1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
|
# IceCream distributed compiling support
#
# Stages directories with symlinks from gcc/g++ to icecc, for both
# native and cross compilers. Depending on each configure or compile,
# the directories are added at the head of the PATH list and ICECC_CXX
# and ICEC_CC are set.
#
# For the cross compiler, creates a tar.gz of our toolchain and sets
# ICECC_VERSION accordingly.
#
#The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
#necessary environment tar.gz file to be used by the remote machines.
#It also supports meta-toolchain generation
#
#If ICECC_PATH is not set in local.conf then the class will try to locate it using 'which'
#but nothing is sure ;)
#
#If ICECC_ENV_EXEC is set in local.conf should point to the icecc-create-env script provided by the user
#or the default one provided by icecc-create-env.bb will be used
#(NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
#
#User can specify if specific packages or packages belonging to class should not use icecc to distribute
#compile jobs to remote machines, but handled localy, by defining ICECC_USER_CLASS_BL and ICECC_PACKAGE_BL
#with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
#which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
#########################################################################################
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
def icecc_dep_prepend(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
def get_cross_kernel_cc(bb,d):
kernel_cc = d.getVar('KERNEL_CC')
# evaluate the expression by the shell if necessary
if '`' in kernel_cc or '$(' in kernel_cc:
kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
kernel_cc = d.expand(kernel_cc)
kernel_cc = kernel_cc.replace('ccache', '').strip()
kernel_cc = kernel_cc.split(' ')[0]
kernel_cc = kernel_cc.strip()
return kernel_cc
def get_icecc(d):
return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
def create_path(compilers, bb, d):
"""
Create Symlinks for the icecc in the staging directory
"""
staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
if icc_is_kernel(bb, d):
staging += "-kernel"
#check if the icecc path is set by the user
icecc = get_icecc(d)
# Create the dir if necessary
try:
os.stat(staging)
except:
try:
os.makedirs(staging)
except:
pass
for compiler in compilers:
gcc_path = os.path.join(staging, compiler)
try:
os.stat(gcc_path)
except:
try:
os.symlink(icecc, gcc_path)
except:
pass
return staging
def use_icc(bb,d):
package_tmp = d.expand('${PN}')
system_class_blacklist = [ "none" ]
user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
package_class_blacklist = system_class_blacklist + user_class_blacklist
for black in package_class_blacklist:
if bb.data.inherits_class(black, d):
#bb.note(package_tmp, ' class ', black, ' found in blacklist, disable icecc')
return "no"
#"system" package blacklist contains a list of packages that can not distribute compile tasks
#for one reason or the other
system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
package_blacklist = system_package_blacklist + user_package_blacklist
for black in package_blacklist:
if black in package_tmp:
#bb.note(package_tmp, ' found in blacklist, disable icecc')
return "no"
for white in user_package_whitelist:
if white in package_tmp:
bb.debug(1, package_tmp, " ", d.expand('${PV})'), " found in whitelist, enable icecc")
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
bb.debug(1, package_tmp, " ", d.expand('${PV}'), " has empty PARALLEL_MAKE, disable icecc")
return "no"
return "yes"
def icc_is_kernel(bb, d):
return \
bb.data.inherits_class("kernel", d);
def icc_is_native(bb, d):
return \
bb.data.inherits_class("cross", d) or \
bb.data.inherits_class("native", d);
def icc_version(bb, d):
if use_icc(bb, d) == "no":
return ""
parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
if not d.getVar('PARALLEL_MAKE') == "":
d.setVar("PARALLEL_MAKE", parallel)
if icc_is_native(bb, d):
archive_name = "local-host-env"
elif d.expand('${HOST_PREFIX}') == "":
bb.fatal(d.expand("${PN}"), " NULL prefix")
else:
prefix = d.expand('${HOST_PREFIX}' )
distro = d.expand('${DISTRO}')
target_sys = d.expand('${TARGET_SYS}')
float = d.getVar('TARGET_FPU') or "hard"
archive_name = prefix + distro + "-" + target_sys + "-" + float
if icc_is_kernel(bb, d):
archive_name += "-kernel"
import socket
ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
return tar_file
def icc_path(bb,d):
if icc_is_kernel(bb, d):
return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
else:
prefix = d.expand('${HOST_PREFIX}')
return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
def icc_get_external_tool(bb, d, tool):
external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
target_prefix = d.expand('${TARGET_PREFIX}')
return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
def icc_get_tool(bb, d, tool):
if icc_is_native(bb, d):
return bb.utils.which(os.getenv("PATH"), tool)
elif icc_is_kernel(bb, d):
return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
else:
ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
target_sys = d.expand('${TARGET_SYS}')
tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
if os.path.isfile(tool_bin):
return tool_bin
else:
external_tool_bin = icc_get_external_tool(bb, d, tool)
if os.path.isfile(external_tool_bin):
return external_tool_bin
else:
return ""
def icc_get_and_check_tool(bb, d, tool):
# Check that g++ or gcc is not a symbolic link to icecc binary in
# PATH or icecc-create-env script will silently create an invalid
# compiler environment package.
t = icc_get_tool(bb, d, tool)
if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
return ""
else:
return t
wait_for_file() {
local TIME_ELAPSED=0
local FILE_TO_TEST=$1
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
fi
sleep 1
done
}
def set_icecc_env():
# dummy python version of set_icecc_env
return
set_icecc_env() {
if [ "x${ICECC_DISABLED}" != "x" ]
then
return
fi
ICECC_VERSION="${@icc_version(bb, d)}"
if [ "x${ICECC_VERSION}" = "x" ]
then
bbwarn "Cannot use icecc: could not get ICECC_VERSION"
return
fi
ICE_PATH="${@icc_path(bb, d)}"
if [ "x${ICE_PATH}" = "x" ]
then
bbwarn "Cannot use icecc: could not get ICE_PATH"
return
fi
ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
return
fi
ICE_VERSION=`$ICECC_CC -dumpversion`
ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
return
fi
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
if [ "`dirname "${ICECC_AS}"`" = "." ]
then
ICECC_AS="`which as`"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
mkdir -p "`dirname "${ICECC_VERSION}"`"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
if flock -n "${ICECC_VERSION}.lock" \
${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
then
touch "${ICECC_VERSION}.done"
elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
then
# locking failed so wait for ${ICECC_VERSION}.done to appear
bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
return
fi
fi
export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
export CCACHE_PATH="$PATH"
bbnote "Using icecc"
}
do_configure_prepend() {
set_icecc_env
}
do_compile_prepend() {
set_icecc_env
}
do_compile_kernelmodules_prepend() {
set_icecc_env
}
do_install_prepend() {
set_icecc_env
}
|