summaryrefslogtreecommitdiff
path: root/scripts/lib
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib')
-rw-r--r--scripts/lib/argparse_oe.py129
-rw-r--r--scripts/lib/bsp/__init__.py22
-rw-r--r--scripts/lib/bsp/engine.py1947
-rw-r--r--scripts/lib/bsp/help.py1046
-rw-r--r--scripts/lib/bsp/kernel.py1072
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf102
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf34
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend2
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-non_hardware.cfg31
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-preempt-rt.scc15
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc15
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.cfg321
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.scc8
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/README118
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/README.sources17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf10
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine/machconfig5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend2
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall26
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.bb57
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/defconfig5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-config.cfg9
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-patches.scc9
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.cfg4
-rw-r--r--scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.scc18
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf77
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend2
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-preempt-rt.scc17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg55
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.scc21
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/README64
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf10
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall14
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.bbappend9
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version/example.patch12
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb23
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/example.patch12
-rw-r--r--scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/helloworld.c8
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf39
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-preempt-rt.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.cfg2
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.scc8
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf39
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-preempt-rt.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.cfg66
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.scc8
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf87
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-preempt-rt.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.cfg164
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.scc10
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf74
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine/interfaces5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf77
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-preempt-rt.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc20
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.scc5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-dev.bbappend55
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend62
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend62
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend61
-rw-r--r--scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend61
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore0
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/machine.conf65
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend2
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files.noinstall1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-preempt-rt.scc17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc17
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-tiny.scc11
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-config.cfg1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-features.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-patches.scc1
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.cfg48
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.scc14
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall5
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-dev.bbappend25
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend33
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend32
-rw-r--r--scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend32
-rw-r--r--scripts/lib/bsp/tags.py49
-rw-r--r--scripts/lib/devtool/__init__.py257
-rw-r--r--scripts/lib/devtool/build.py86
-rw-r--r--scripts/lib/devtool/build_image.py168
-rw-r--r--scripts/lib/devtool/build_sdk.py65
-rw-r--r--scripts/lib/devtool/deploy.py304
-rw-r--r--scripts/lib/devtool/package.py62
-rw-r--r--scripts/lib/devtool/runqemu.py65
-rw-r--r--scripts/lib/devtool/sdk.py366
-rw-r--r--scripts/lib/devtool/search.py88
-rw-r--r--scripts/lib/devtool/standard.py1452
-rw-r--r--scripts/lib/devtool/upgrade.py382
-rw-r--r--scripts/lib/devtool/utilcmds.py233
-rw-r--r--scripts/lib/recipetool/__init__.py0
-rw-r--r--scripts/lib/recipetool/append.py471
-rw-r--r--scripts/lib/recipetool/create.py963
-rw-r--r--scripts/lib/recipetool/create_buildsys.py859
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py719
-rw-r--r--scripts/lib/recipetool/create_kernel.py99
-rw-r--r--scripts/lib/recipetool/create_kmod.py152
-rw-r--r--scripts/lib/recipetool/create_npm.py156
-rw-r--r--scripts/lib/recipetool/newappend.py112
-rw-r--r--scripts/lib/recipetool/setvar.py75
-rw-r--r--scripts/lib/scriptpath.py42
-rw-r--r--scripts/lib/scriptutils.py118
-rw-r--r--scripts/lib/wic/__init__.py4
-rw-r--r--scripts/lib/wic/__version__.py1
-rw-r--r--scripts/lib/wic/canned-wks/common.wks.inc3
-rw-r--r--scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg11
-rw-r--r--scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks8
-rw-r--r--scripts/lib/wic/canned-wks/directdisk-gpt.wks10
-rw-r--r--scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks23
-rw-r--r--scripts/lib/wic/canned-wks/directdisk.wks8
-rw-r--r--scripts/lib/wic/canned-wks/mkefidisk.wks11
-rw-r--r--scripts/lib/wic/canned-wks/mkgummidisk.wks11
-rw-r--r--scripts/lib/wic/canned-wks/mkhybridiso.wks7
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks8
-rw-r--r--scripts/lib/wic/canned-wks/sdimage-bootpart.wks6
-rw-r--r--scripts/lib/wic/conf.py103
-rw-r--r--scripts/lib/wic/config/wic.conf6
-rw-r--r--scripts/lib/wic/creator.py124
-rw-r--r--scripts/lib/wic/engine.py220
-rw-r--r--scripts/lib/wic/help.py777
-rw-r--r--scripts/lib/wic/imager/__init__.py0
-rw-r--r--scripts/lib/wic/imager/baseimager.py192
-rw-r--r--scripts/lib/wic/imager/direct.py380
-rw-r--r--scripts/lib/wic/ksparser.py169
-rw-r--r--scripts/lib/wic/msger.py309
-rw-r--r--scripts/lib/wic/partition.py414
-rw-r--r--scripts/lib/wic/plugin.py150
-rw-r--r--scripts/lib/wic/pluginbase.py108
-rw-r--r--scripts/lib/wic/plugins/imager/direct_plugin.py102
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py237
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py140
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py210
-rw-r--r--scripts/lib/wic/plugins/source/fsimage.py73
-rw-r--r--scripts/lib/wic/plugins/source/isoimage-isohybrid.py550
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py88
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py83
-rw-r--r--scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py177
-rw-r--r--scripts/lib/wic/test1
-rw-r--r--scripts/lib/wic/utils/__init__.py0
-rw-r--r--scripts/lib/wic/utils/errors.py29
-rw-r--r--scripts/lib/wic/utils/fs_related.py84
-rw-r--r--scripts/lib/wic/utils/misc.py95
-rw-r--r--scripts/lib/wic/utils/oe/__init__.py22
-rw-r--r--scripts/lib/wic/utils/oe/misc.py250
-rw-r--r--scripts/lib/wic/utils/partitionedfs.py360
-rw-r--r--scripts/lib/wic/utils/runner.py110
-rw-r--r--scripts/lib/wic/utils/syslinux.py58
237 files changed, 20705 insertions, 0 deletions
diff --git a/scripts/lib/argparse_oe.py b/scripts/lib/argparse_oe.py
new file mode 100644
index 0000000..bf3ebad
--- /dev/null
+++ b/scripts/lib/argparse_oe.py
@@ -0,0 +1,129 @@
+import sys
+import argparse
+from collections import defaultdict, OrderedDict
+
+class ArgumentUsageError(Exception):
+ """Exception class you can raise (and catch) in order to show the help"""
+ def __init__(self, message, subcommand=None):
+ self.message = message
+ self.subcommand = subcommand
+
+class ArgumentParser(argparse.ArgumentParser):
+ """Our own version of argparse's ArgumentParser"""
+ def __init__(self, *args, **kwargs):
+ kwargs.setdefault('formatter_class', OeHelpFormatter)
+ self._subparser_groups = OrderedDict()
+ super(ArgumentParser, self).__init__(*args, **kwargs)
+
+ def error(self, message):
+ sys.stderr.write('ERROR: %s\n' % message)
+ self.print_help()
+ sys.exit(2)
+
+ def error_subcommand(self, message, subcommand):
+ if subcommand:
+ for action in self._actions:
+ if isinstance(action, argparse._SubParsersAction):
+ for choice, subparser in action.choices.items():
+ if choice == subcommand:
+ subparser.error(message)
+ return
+ self.error(message)
+
+ def add_subparsers(self, *args, **kwargs):
+ ret = super(ArgumentParser, self).add_subparsers(*args, **kwargs)
+ # Need a way of accessing the parent parser
+ ret._parent_parser = self
+ # Ensure our class gets instantiated
+ ret._parser_class = ArgumentSubParser
+ # Hacky way of adding a method to the subparsers object
+ ret.add_subparser_group = self.add_subparser_group
+ return ret
+
+ def add_subparser_group(self, groupname, groupdesc, order=0):
+ self._subparser_groups[groupname] = (groupdesc, order)
+
+
+class ArgumentSubParser(ArgumentParser):
+ def __init__(self, *args, **kwargs):
+ if 'group' in kwargs:
+ self._group = kwargs.pop('group')
+ if 'order' in kwargs:
+ self._order = kwargs.pop('order')
+ super(ArgumentSubParser, self).__init__(*args, **kwargs)
+ for agroup in self._action_groups:
+ if agroup.title == 'optional arguments':
+ agroup.title = 'options'
+ break
+
+ def parse_known_args(self, args=None, namespace=None):
+ # This works around argparse not handling optional positional arguments being
+ # intermixed with other options. A pretty horrible hack, but we're not left
+ # with much choice given that the bug in argparse exists and it's difficult
+ # to subclass.
+ # Borrowed from http://stackoverflow.com/questions/20165843/argparse-how-to-handle-variable-number-of-arguments-nargs
+ # with an extra workaround (in format_help() below) for the positional
+ # arguments disappearing from the --help output, as well as structural tweaks.
+ # Originally simplified from http://bugs.python.org/file30204/test_intermixed.py
+ positionals = self._get_positional_actions()
+ for action in positionals:
+ # deactivate positionals
+ action.save_nargs = action.nargs
+ action.nargs = 0
+
+ namespace, remaining_args = super(ArgumentSubParser, self).parse_known_args(args, namespace)
+ for action in positionals:
+ # remove the empty positional values from namespace
+ if hasattr(namespace, action.dest):
+ delattr(namespace, action.dest)
+ for action in positionals:
+ action.nargs = action.save_nargs
+ # parse positionals
+ namespace, extras = super(ArgumentSubParser, self).parse_known_args(remaining_args, namespace)
+ return namespace, extras
+
+ def format_help(self):
+ # Quick, restore the positionals!
+ positionals = self._get_positional_actions()
+ for action in positionals:
+ if hasattr(action, 'save_nargs'):
+ action.nargs = action.save_nargs
+ return super(ArgumentParser, self).format_help()
+
+
+class OeHelpFormatter(argparse.HelpFormatter):
+ def _format_action(self, action):
+ if hasattr(action, '_get_subactions'):
+ # subcommands list
+ groupmap = defaultdict(list)
+ ordermap = {}
+ subparser_groups = action._parent_parser._subparser_groups
+ groups = sorted(subparser_groups.keys(), key=lambda item: subparser_groups[item][1], reverse=True)
+ for subaction in self._iter_indented_subactions(action):
+ parser = action._name_parser_map[subaction.dest]
+ group = getattr(parser, '_group', None)
+ groupmap[group].append(subaction)
+ if group not in groups:
+ groups.append(group)
+ order = getattr(parser, '_order', 0)
+ ordermap[subaction.dest] = order
+
+ lines = []
+ if len(groupmap) > 1:
+ groupindent = ' '
+ else:
+ groupindent = ''
+ for group in groups:
+ subactions = groupmap[group]
+ if not subactions:
+ continue
+ if groupindent:
+ if not group:
+ group = 'other'
+ groupdesc = subparser_groups.get(group, (group, 0))[0]
+ lines.append(' %s:' % groupdesc)
+ for subaction in sorted(subactions, key=lambda item: ordermap[item.dest], reverse=True):
+ lines.append('%s%s' % (groupindent, self._format_action(subaction).rstrip()))
+ return '\n'.join(lines)
+ else:
+ return super(OeHelpFormatter, self)._format_action(action)
diff --git a/scripts/lib/bsp/__init__.py b/scripts/lib/bsp/__init__.py
new file mode 100644
index 0000000..8bbb6e1
--- /dev/null
+++ b/scripts/lib/bsp/__init__.py
@@ -0,0 +1,22 @@
+#
+# Yocto BSP tools library
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] intel.com>
+#
diff --git a/scripts/lib/bsp/engine.py b/scripts/lib/bsp/engine.py
new file mode 100644
index 0000000..66e2162
--- /dev/null
+++ b/scripts/lib/bsp/engine.py
@@ -0,0 +1,1947 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module implements the templating engine used by 'yocto-bsp' to
+# create BSPs. The BSP templates are simply the set of files expected
+# to appear in a generated BSP, marked up with a small set of tags
+# used to customize the output. The engine parses through the
+# templates and generates a Python program containing all the logic
+# and input elements needed to display and retrieve BSP-specific
+# information from the user. The resulting program uses those results
+# to generate the final BSP files.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] intel.com>
+#
+
+import os
+import sys
+from abc import ABCMeta, abstractmethod
+from tags import *
+import shlex
+import json
+import subprocess
+import shutil
+
+class Line():
+ """
+ Generic (abstract) container representing a line that will appear
+ in the BSP-generating program.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, line):
+ self.line = line
+ self.generated_line = ""
+ self.prio = sys.maxint
+ self.discard = False
+
+ @abstractmethod
+ def gen(self, context = None):
+ """
+ Generate the final executable line that will appear in the
+ BSP-generation program.
+ """
+ pass
+
+ def escape(self, line):
+ """
+ Escape single and double quotes and backslashes until I find
+ something better (re.escape() escapes way too much).
+ """
+ return line.replace("\\", "\\\\").replace("\"", "\\\"").replace("'", "\\'")
+
+ def parse_error(self, msg, lineno, line):
+ raise SyntaxError("%s: %s" % (msg, line))
+
+
+class NormalLine(Line):
+ """
+ Container for normal (non-tag) lines.
+ """
+ def __init__(self, line):
+ Line.__init__(self, line)
+ self.is_filename = False
+ self.is_dirname = False
+ self.out_filebase = None
+
+ def gen(self, context = None):
+ if self.is_filename:
+ line = "current_file = \"" + os.path.join(self.out_filebase, self.escape(self.line)) + "\"; of = open(current_file, \"w\")"
+ elif self.is_dirname:
+ dirname = os.path.join(self.out_filebase, self.escape(self.line))
+ line = "if not os.path.exists(\"" + dirname + "\"): os.mkdir(\"" + dirname + "\")"
+ else:
+ line = "of.write(\"" + self.escape(self.line) + "\\n\")"
+ return line
+
+
+class CodeLine(Line):
+ """
+ Container for Python code tag lines.
+ """
+ def __init__(self, line):
+ Line.__init__(self, line)
+
+ def gen(self, context = None):
+ return self.line
+
+
+class Assignment:
+ """
+ Representation of everything we know about {{=name }} tags.
+ Instances of these are used by Assignment lines.
+ """
+ def __init__(self, start, end, name):
+ self.start = start
+ self.end = end
+ self.name = name
+
+
+class AssignmentLine(NormalLine):
+ """
+ Container for normal lines containing assignment tags. Assignment
+ tags must be in ascending order of 'start' value.
+ """
+ def __init__(self, line):
+ NormalLine.__init__(self, line)
+ self.assignments = []
+
+ def add_assignment(self, start, end, name):
+ self.assignments.append(Assignment(start, end, name))
+
+ def gen(self, context = None):
+ line = self.escape(self.line)
+
+ for assignment in self.assignments:
+ replacement = "\" + " + assignment.name + " + \""
+ idx = line.find(ASSIGN_TAG)
+ line = line[:idx] + replacement + line[idx + assignment.end - assignment.start:]
+ if self.is_filename:
+ return "current_file = \"" + os.path.join(self.out_filebase, line) + "\"; of = open(current_file, \"w\")"
+ elif self.is_dirname:
+ dirname = os.path.join(self.out_filebase, line)
+ return "if not os.path.exists(\"" + dirname + "\"): os.mkdir(\"" + dirname + "\")"
+ else:
+ return "of.write(\"" + line + "\\n\")"
+
+
+class InputLine(Line):
+ """
+ Base class for Input lines.
+ """
+ def __init__(self, props, tag, lineno):
+ Line.__init__(self, tag)
+ self.props = props
+ self.lineno = lineno
+
+ try:
+ self.prio = int(props["prio"])
+ except KeyError:
+ self.prio = sys.maxint
+
+ def gen(self, context = None):
+ try:
+ depends_on = self.props["depends-on"]
+ try:
+ depends_on_val = self.props["depends-on-val"]
+ except KeyError:
+ self.parse_error("No 'depends-on-val' for 'depends-on' property",
+ self.lineno, self.line)
+ except KeyError:
+ pass
+
+
+class EditBoxInputLine(InputLine):
+ """
+ Base class for 'editbox' Input lines.
+
+ props:
+ name: example - "Load address"
+ msg: example - "Please enter the load address"
+ result:
+ Sets the value of the variable specified by 'name' to
+ whatever the user typed.
+ """
+ def __init__(self, props, tag, lineno):
+ InputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ InputLine.gen(self, context)
+ name = self.props["name"]
+ if not name:
+ self.parse_error("No input 'name' property found",
+ self.lineno, self.line)
+ msg = self.props["msg"]
+ if not msg:
+ self.parse_error("No input 'msg' property found",
+ self.lineno, self.line)
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ msg += " [default: " + default_choice + "]"
+
+ line = name + " = default(raw_input(\"" + msg + " \"), " + name + ")"
+
+ return line
+
+
+class GitRepoEditBoxInputLine(EditBoxInputLine):
+ """
+ Base class for 'editbox' Input lines for user input of remote git
+ repos. This class verifies the existence and connectivity of the
+ specified git repo.
+
+ props:
+ name: example - "Load address"
+ msg: example - "Please enter the load address"
+ result:
+ Sets the value of the variable specified by 'name' to
+ whatever the user typed.
+ """
+ def __init__(self, props, tag, lineno):
+ EditBoxInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ EditBoxInputLine.gen(self, context)
+ name = self.props["name"]
+ if not name:
+ self.parse_error("No input 'name' property found",
+ self.lineno, self.line)
+ msg = self.props["msg"]
+ if not msg:
+ self.parse_error("No input 'msg' property found",
+ self.lineno, self.line)
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ msg += " [default: " + default_choice + "]"
+
+ line = name + " = get_verified_git_repo(\"" + msg + "\"," + name + ")"
+
+ return line
+
+
+class FileEditBoxInputLine(EditBoxInputLine):
+ """
+ Base class for 'editbox' Input lines for user input of existing
+ files. This class verifies the existence of the specified file.
+
+ props:
+ name: example - "Load address"
+ msg: example - "Please enter the load address"
+ result:
+ Sets the value of the variable specified by 'name' to
+ whatever the user typed.
+ """
+ def __init__(self, props, tag, lineno):
+ EditBoxInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ EditBoxInputLine.gen(self, context)
+ name = self.props["name"]
+ if not name:
+ self.parse_error("No input 'name' property found",
+ self.lineno, self.line)
+ msg = self.props["msg"]
+ if not msg:
+ self.parse_error("No input 'msg' property found",
+ self.lineno, self.line)
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ msg += " [default: " + default_choice + "]"
+
+ line = name + " = get_verified_file(\"" + msg + "\"," + name + ", True)"
+
+ return line
+
+
+class BooleanInputLine(InputLine):
+ """
+ Base class for boolean Input lines.
+ props:
+ name: example - "keyboard"
+ msg: example - "Got keyboard?"
+ result:
+ Sets the value of the variable specified by 'name' to "yes" or "no"
+ example - keyboard = "yes"
+ """
+ def __init__(self, props, tag, lineno):
+ InputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ InputLine.gen(self, context)
+ name = self.props["name"]
+ if not name:
+ self.parse_error("No input 'name' property found",
+ self.lineno, self.line)
+ msg = self.props["msg"]
+ if not msg:
+ self.parse_error("No input 'msg' property found",
+ self.lineno, self.line)
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ msg += " [default: " + default_choice + "]"
+
+ line = name + " = boolean(raw_input(\"" + msg + " \"), " + name + ")"
+
+ return line
+
+
+class ListInputLine(InputLine):
+ """
+ Base class for List-based Input lines. e.g. Choicelist, Checklist.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, props, tag, lineno):
+ InputLine.__init__(self, props, tag, lineno)
+ self.choices = []
+
+ def gen_choicepair_list(self):
+ """Generate a list of 2-item val:desc lists from self.choices."""
+ if not self.choices:
+ return None
+
+ choicepair_list = list()
+
+ for choice in self.choices:
+ choicepair = []
+ choicepair.append(choice.val)
+ choicepair.append(choice.desc)
+ choicepair_list.append(choicepair)
+
+ return choicepair_list
+
+ def gen_degenerate_choicepair_list(self, choices):
+ """Generate a list of 2-item val:desc with val=desc from passed-in choices."""
+ choicepair_list = list()
+
+ for choice in choices:
+ choicepair = []
+ choicepair.append(choice)
+ choicepair.append(choice)
+ choicepair_list.append(choicepair)
+
+ return choicepair_list
+
+ def exec_listgen_fn(self, context = None):
+ """
+ Execute the list-generating function contained as a string in
+ the "gen" property.
+ """
+ retval = None
+ try:
+ fname = self.props["gen"]
+ modsplit = fname.split('.')
+ mod_fn = modsplit.pop()
+ mod = '.'.join(modsplit)
+
+ __import__(mod)
+ # python 2.7 has a better way to do this using importlib.import_module
+ m = sys.modules[mod]
+
+ fn = getattr(m, mod_fn)
+ if not fn:
+ self.parse_error("couldn't load function specified for 'gen' property ",
+ self.lineno, self.line)
+ retval = fn(context)
+ if not retval:
+ self.parse_error("function specified for 'gen' property returned nothing ",
+ self.lineno, self.line)
+ except KeyError:
+ pass
+
+ return retval
+
+ def gen_choices_str(self, choicepairs):
+ """
+ Generate a numbered list of choices from a list of choicepairs
+ for display to the user.
+ """
+ choices_str = ""
+
+ for i, choicepair in enumerate(choicepairs):
+ choices_str += "\t" + str(i + 1) + ") " + choicepair[1] + "\n"
+
+ return choices_str
+
+ def gen_choices_val_str(self, choicepairs):
+ """
+ Generate an array of choice values corresponding to the
+ numbered list generated by gen_choices_str().
+ """
+ choices_val_list = "["
+
+ for i, choicepair in enumerate(choicepairs):
+ choices_val_list += "\"" + choicepair[0] + "\","
+ choices_val_list += "]"
+
+ return choices_val_list
+
+ def gen_choices_val_list(self, choicepairs):
+ """
+ Generate an array of choice values corresponding to the
+ numbered list generated by gen_choices_str().
+ """
+ choices_val_list = []
+
+ for i, choicepair in enumerate(choicepairs):
+ choices_val_list.append(choicepair[0])
+
+ return choices_val_list
+
+ def gen_choices_list(self, context = None, checklist = False):
+ """
+ Generate an array of choice values corresponding to the
+ numbered list generated by gen_choices_str().
+ """
+ choices = self.exec_listgen_fn(context)
+ if choices:
+ if len(choices) == 0:
+ self.parse_error("No entries available for input list",
+ self.lineno, self.line)
+ choicepairs = self.gen_degenerate_choicepair_list(choices)
+ else:
+ if len(self.choices) == 0:
+ self.parse_error("No entries available for input list",
+ self.lineno, self.line)
+ choicepairs = self.gen_choicepair_list()
+
+ return choicepairs
+
+ def gen_choices(self, context = None, checklist = False):
+ """
+ Generate an array of choice values corresponding to the
+ numbered list generated by gen_choices_str(), display it to
+ the user, and process the result.
+ """
+ msg = self.props["msg"]
+ if not msg:
+ self.parse_error("No input 'msg' property found",
+ self.lineno, self.line)
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ msg += " [default: " + default_choice + "]"
+
+ choicepairs = self.gen_choices_list(context, checklist)
+
+ choices_str = self.gen_choices_str(choicepairs)
+ choices_val_list = self.gen_choices_val_list(choicepairs)
+ if checklist:
+ choiceval = default(find_choicevals(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice)
+ else:
+ choiceval = default(find_choiceval(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice)
+
+ return choiceval
+
+
+def find_choiceval(choice_str, choice_list):
+ """
+ Take number as string and return val string from choice_list,
+ empty string if oob. choice_list is a simple python list.
+ """
+ choice_val = ""
+
+ try:
+ choice_idx = int(choice_str)
+ if choice_idx <= len(choice_list):
+ choice_idx -= 1
+ choice_val = choice_list[choice_idx]
+ except ValueError:
+ pass
+
+ return choice_val
+
+
+def find_choicevals(choice_str, choice_list):
+ """
+ Take numbers as space-separated string and return vals list from
+ choice_list, empty list if oob. choice_list is a simple python
+ list.
+ """
+ choice_vals = []
+
+ choices = choice_str.split()
+ for choice in choices:
+ choice_vals.append(find_choiceval(choice, choice_list))
+
+ return choice_vals
+
+
+def default(input_str, name):
+ """
+ Return default if no input_str, otherwise stripped input_str.
+ """
+ if not input_str:
+ return name
+
+ return input_str.strip()
+
+
+def verify_git_repo(giturl):
+ """
+ Verify that the giturl passed in can be connected to. This can be
+ used as a check for the existence of the given repo and/or basic
+ git remote connectivity.
+
+ Returns True if the connection was successful, fals otherwise
+ """
+ if not giturl:
+ return False
+
+ gitcmd = "git ls-remote %s > /dev/null 2>&1" % (giturl)
+ rc = subprocess.call(gitcmd, shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def get_verified_git_repo(input_str, name):
+ """
+ Return git repo if verified, otherwise loop forever asking user
+ for filename.
+ """
+ msg = input_str.strip() + " "
+
+ giturl = default(raw_input(msg), name)
+
+ while True:
+ if verify_git_repo(giturl):
+ return giturl
+ giturl = default(raw_input(msg), name)
+
+
+def get_verified_file(input_str, name, filename_can_be_null):
+ """
+ Return filename if the file exists, otherwise loop forever asking
+ user for filename.
+ """
+ msg = input_str.strip() + " "
+
+ filename = default(raw_input(msg), name)
+
+ while True:
+ if not filename and filename_can_be_null:
+ return filename
+ if os.path.isfile(filename):
+ return filename
+ filename = default(raw_input(msg), name)
+
+
+def replace_file(replace_this, with_this):
+ """
+ Replace the given file with the contents of filename, retaining
+ the original filename.
+ """
+ try:
+ replace_this.close()
+ shutil.copy(with_this, replace_this.name)
+ except IOError:
+ pass
+
+
+def boolean(input_str, name):
+ """
+ Return lowercase version of first char in string, or value in name.
+ """
+ if not input_str:
+ return name
+
+ str = input_str.lower().strip()
+ if str and str[0] == "y" or str[0] == "n":
+ return str[0]
+ else:
+ return name
+
+
+def strip_base(input_str):
+ """
+ strip '/base' off the end of input_str, so we can use 'base' in
+ the branch names we present to the user.
+ """
+ if input_str and input_str.endswith("/base"):
+ return input_str[:-len("/base")]
+ return input_str.strip()
+
+
+deferred_choices = {}
+
+def gen_choices_defer(input_line, context, checklist = False):
+ """
+ Save the context hashed the name of the input item, which will be
+ passed to the gen function later.
+ """
+ name = input_line.props["name"]
+
+ try:
+ nameappend = input_line.props["nameappend"]
+ except KeyError:
+ nameappend = ""
+
+ try:
+ branches_base = input_line.props["branches_base"]
+ except KeyError:
+ branches_base = ""
+
+ filename = input_line.props["filename"]
+
+ closetag_start = filename.find(CLOSE_TAG)
+
+ if closetag_start != -1:
+ filename = filename[closetag_start + len(CLOSE_TAG):]
+
+ filename = filename.strip()
+ filename = os.path.splitext(filename)[0]
+
+ captured_context = capture_context(context)
+ context["filename"] = filename
+ captured_context["filename"] = filename
+ context["nameappend"] = nameappend
+ captured_context["nameappend"] = nameappend
+ context["branches_base"] = branches_base
+ captured_context["branches_base"] = branches_base
+
+ deferred_choice = (input_line, captured_context, checklist)
+ key = name + "_" + filename + "_" + nameappend
+ deferred_choices[key] = deferred_choice
+
+
+def invoke_deferred_choices(name):
+ """
+ Invoke the choice generation function using the context hashed by
+ 'name'.
+ """
+ deferred_choice = deferred_choices[name]
+ input_line = deferred_choice[0]
+ context = deferred_choice[1]
+ checklist = deferred_choice[2]
+
+ context["name"] = name
+
+ choices = input_line.gen_choices(context, checklist)
+
+ return choices
+
+
+class ChoicelistInputLine(ListInputLine):
+ """
+ Base class for choicelist Input lines.
+ props:
+ name: example - "xserver_choice"
+ msg: example - "Please select an xserver for this machine"
+ result:
+ Sets the value of the variable specified by 'name' to whichever Choice was chosen
+ example - xserver_choice = "xserver_vesa"
+ """
+ def __init__(self, props, tag, lineno):
+ ListInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ InputLine.gen(self, context)
+
+ gen_choices_defer(self, context)
+ name = self.props["name"]
+ nameappend = context["nameappend"]
+ filename = context["filename"]
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ line = name + " = default(invoke_deferred_choices(\"" + name + "_" + filename + "_" + nameappend + "\"), \"" + default_choice + "\")"
+
+ return line
+
+
+class ListValInputLine(InputLine):
+ """
+ Abstract base class for choice and checkbox Input lines.
+ """
+ def __init__(self, props, tag, lineno):
+ InputLine.__init__(self, props, tag, lineno)
+
+ try:
+ self.val = self.props["val"]
+ except KeyError:
+ self.parse_error("No input 'val' property found", self.lineno, self.line)
+
+ try:
+ self.desc = self.props["msg"]
+ except KeyError:
+ self.parse_error("No input 'msg' property found", self.lineno, self.line)
+
+
+class ChoiceInputLine(ListValInputLine):
+ """
+ Base class for choicelist item Input lines.
+ """
+ def __init__(self, props, tag, lineno):
+ ListValInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ return None
+
+
+class ChecklistInputLine(ListInputLine):
+ """
+ Base class for checklist Input lines.
+ """
+ def __init__(self, props, tag, lineno):
+ ListInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ InputLine.gen(self, context)
+
+ gen_choices_defer(self, context, True)
+ name = self.props["name"]
+ nameappend = context["nameappend"]
+ filename = context["filename"]
+
+ try:
+ default_choice = self.props["default"]
+ except KeyError:
+ default_choice = ""
+
+ line = name + " = default(invoke_deferred_choices(\"" + name + "_" + filename + "_" + nameappend + "\"), \"" + default_choice + "\")"
+
+ return line
+
+
+class CheckInputLine(ListValInputLine):
+ """
+ Base class for checklist item Input lines.
+ """
+ def __init__(self, props, tag, lineno):
+ ListValInputLine.__init__(self, props, tag, lineno)
+
+ def gen(self, context = None):
+ return None
+
+
+dirname_substitutions = {}
+
+class SubstrateBase(object):
+ """
+ Base class for both expanded and unexpanded file and dir container
+ objects.
+ """
+ def __init__(self, filename, filebase, out_filebase):
+ self.filename = filename
+ self.filebase = filebase
+ self.translated_filename = filename
+ self.out_filebase = out_filebase
+ self.raw_lines = []
+ self.expanded_lines = []
+ self.prev_choicelist = None
+
+ def parse_error(self, msg, lineno, line):
+ raise SyntaxError("%s: [%s: %d]: %s" % (msg, self.filename, lineno, line))
+
+ def expand_input_tag(self, tag, lineno):
+ """
+ Input tags consist of the word 'input' at the beginning,
+ followed by name:value property pairs which are converted into
+ a dictionary.
+ """
+ propstr = tag[len(INPUT_TAG):]
+
+ props = dict(prop.split(":", 1) for prop in shlex.split(propstr))
+ props["filename"] = self.filename
+
+ input_type = props[INPUT_TYPE_PROPERTY]
+ if not props[INPUT_TYPE_PROPERTY]:
+ self.parse_error("No input 'type' property found", lineno, tag)
+
+ if input_type == "boolean":
+ return BooleanInputLine(props, tag, lineno)
+ if input_type == "edit":
+ return EditBoxInputLine(props, tag, lineno)
+ if input_type == "edit-git-repo":
+ return GitRepoEditBoxInputLine(props, tag, lineno)
+ if input_type == "edit-file":
+ return FileEditBoxInputLine(props, tag, lineno)
+ elif input_type == "choicelist":
+ self.prev_choicelist = ChoicelistInputLine(props, tag, lineno)
+ return self.prev_choicelist
+ elif input_type == "choice":
+ if not self.prev_choicelist:
+ self.parse_error("Found 'choice' input tag but no previous choicelist",
+ lineno, tag)
+ choice = ChoiceInputLine(props, tag, lineno)
+ self.prev_choicelist.choices.append(choice)
+ return choice
+ elif input_type == "checklist":
+ return ChecklistInputLine(props, tag, lineno)
+ elif input_type == "check":
+ return CheckInputLine(props, tag, lineno)
+
+ def expand_assignment_tag(self, start, line, lineno):
+ """
+ Expand all tags in a line.
+ """
+ expanded_line = AssignmentLine(line.rstrip())
+
+ while start != -1:
+ end = line.find(CLOSE_TAG, start)
+ if end == -1:
+ self.parse_error("No close tag found for assignment tag", lineno, line)
+ else:
+ name = line[start + len(ASSIGN_TAG):end].strip()
+ expanded_line.add_assignment(start, end + len(CLOSE_TAG), name)
+ start = line.find(ASSIGN_TAG, end)
+
+ return expanded_line
+
+ def expand_tag(self, line, lineno):
+ """
+ Returns a processed tag line, or None if there was no tag
+
+ The rules for tags are very simple:
+ - No nested tags
+ - Tags start with {{ and end with }}
+ - An assign tag, {{=, can appear anywhere and will
+ be replaced with what the assignment evaluates to
+ - Any other tag occupies the whole line it is on
+ - if there's anything else on the tag line, it's an error
+ - if it starts with 'input', it's an input tag and
+ will only be used for prompting and setting variables
+ - anything else is straight Python
+ - tags are in effect only until the next blank line or tag or 'pass' tag
+ - we don't have indentation in tags, but we need some way to end a block
+ forcefully without blank lines or other tags - that's the 'pass' tag
+ - todo: implement pass tag
+ - directories and filenames can have tags as well, but only assignment
+ and 'if' code lines
+ - directories and filenames are the only case where normal tags can
+ coexist with normal text on the same 'line'
+ """
+ start = line.find(ASSIGN_TAG)
+ if start != -1:
+ return self.expand_assignment_tag(start, line, lineno)
+
+ start = line.find(OPEN_TAG)
+ if start == -1:
+ return None
+
+ end = line.find(CLOSE_TAG, 0)
+ if end == -1:
+ self.parse_error("No close tag found for open tag", lineno, line)
+
+ tag = line[start + len(OPEN_TAG):end].strip()
+
+ if not tag.lstrip().startswith(INPUT_TAG):
+ return CodeLine(tag)
+
+ return self.expand_input_tag(tag, lineno)
+
+ def append_translated_filename(self, filename):
+ """
+ Simply append filename to translated_filename
+ """
+ self.translated_filename = os.path.join(self.translated_filename, filename)
+
+ def get_substituted_file_or_dir_name(self, first_line, tag):
+ """
+ If file or dir names contain name substitutions, return the name
+ to substitute. Note that this is just the file or dirname and
+ doesn't include the path.
+ """
+ filename = first_line.find(tag)
+ if filename != -1:
+ filename += len(tag)
+ substituted_filename = first_line[filename:].strip()
+ this = substituted_filename.find(" this")
+ if this != -1:
+ head, tail = os.path.split(self.filename)
+ substituted_filename = substituted_filename[:this + 1] + tail
+ if tag == DIRNAME_TAG: # get rid of .noinstall in dirname
+ substituted_filename = substituted_filename.split('.')[0]
+
+ return substituted_filename
+
+ def get_substituted_filename(self, first_line):
+ """
+ If a filename contains a name substitution, return the name to
+ substitute. Note that this is just the filename and doesn't
+ include the path.
+ """
+ return self.get_substituted_file_or_dir_name(first_line, FILENAME_TAG)
+
+ def get_substituted_dirname(self, first_line):
+ """
+ If a dirname contains a name substitution, return the name to
+ substitute. Note that this is just the dirname and doesn't
+ include the path.
+ """
+ return self.get_substituted_file_or_dir_name(first_line, DIRNAME_TAG)
+
+ def substitute_filename(self, first_line):
+ """
+ Find the filename in first_line and append it to translated_filename.
+ """
+ substituted_filename = self.get_substituted_filename(first_line)
+ self.append_translated_filename(substituted_filename);
+
+ def substitute_dirname(self, first_line):
+ """
+ Find the dirname in first_line and append it to translated_filename.
+ """
+ substituted_dirname = self.get_substituted_dirname(first_line)
+ self.append_translated_filename(substituted_dirname);
+
+ def is_filename_substitution(self, line):
+ """
+ Do we have a filename subustition?
+ """
+ if line.find(FILENAME_TAG) != -1:
+ return True
+ return False
+
+ def is_dirname_substitution(self, line):
+ """
+ Do we have a dirname subustition?
+ """
+ if line.find(DIRNAME_TAG) != -1:
+ return True
+ return False
+
+ def translate_dirname(self, first_line):
+ """
+ Just save the first_line mapped by filename. The later pass
+ through the directories will look for a dirname.noinstall
+ match and grab the substitution line.
+ """
+ dirname_substitutions[self.filename] = first_line
+
+ def translate_dirnames_in_path(self, path):
+ """
+ Translate dirnames below this file or dir, not including tail.
+ dirname_substititions is keyed on actual untranslated filenames.
+ translated_path contains the subsititutions for each element.
+ """
+ remainder = path[len(self.filebase)+1:]
+ translated_path = untranslated_path = self.filebase
+
+ untranslated_dirs = remainder.split(os.sep)
+
+ for dir in untranslated_dirs:
+ key = os.path.join(untranslated_path, dir + '.noinstall')
+ try:
+ first_line = dirname_substitutions[key]
+ except KeyError:
+ translated_path = os.path.join(translated_path, dir)
+ untranslated_path = os.path.join(untranslated_path, dir)
+ continue
+ substituted_dir = self.get_substituted_dirname(first_line)
+ translated_path = os.path.join(translated_path, substituted_dir)
+ untranslated_path = os.path.join(untranslated_path, dir)
+
+ return translated_path
+
+ def translate_file_or_dir_name(self):
+ """
+ Originally we were allowed to use open/close/assign tags and python
+ code in the filename, which fit in nicely with the way we
+ processed the templates and generated code. Now that we can't
+ do that, we make those tags proper file contents and have this
+ pass substitute the nice but non-functional names with those
+ 'strange' ones, and then proceed as usual.
+
+ So, if files or matching dir<.noinstall> files contain
+ filename substitutions, this function translates them into the
+ corresponding 'strange' names, which future passes will expand
+ as they always have. The resulting pathname is kept in the
+ file or directory's translated_filename. Another way to think
+ about it is that self.filename is the input filename, and
+ translated_filename is the output filename before expansion.
+ """
+ # remove leaf file or dirname
+ head, tail = os.path.split(self.filename)
+ translated_path = self.translate_dirnames_in_path(head)
+ self.translated_filename = translated_path
+
+ # This is a dirname - does it have a matching .noinstall with
+ # a substitution? If so, apply the dirname subsititution.
+ if not os.path.isfile(self.filename):
+ key = self.filename + ".noinstall"
+ try:
+ first_line = dirname_substitutions[key]
+ except KeyError:
+ self.append_translated_filename(tail)
+ return
+ self.substitute_dirname(first_line)
+ return
+
+ f = open(self.filename)
+ first_line = f.readline()
+ f.close()
+
+ # This is a normal filename not needing translation, just use
+ # it as-is.
+ if not first_line or not first_line.startswith("#"):
+ self.append_translated_filename(tail)
+ return
+
+ # If we have a filename substitution (first line in the file
+ # is a FILENAME_TAG line) do the substitution now. If we have
+ # a dirname substitution (DIRNAME_TAG in dirname.noinstall
+ # meta-file), hash it so we can apply it when we see the
+ # matching dirname later. Otherwise we have a regular
+ # filename, just use it as-is.
+ if self.is_filename_substitution(first_line):
+ self.substitute_filename(first_line)
+ elif self.is_dirname_substitution(first_line):
+ self.translate_dirname(first_line)
+ else:
+ self.append_translated_filename(tail)
+
+ def expand_file_or_dir_name(self):
+ """
+ Expand file or dir names into codeline. Dirnames and
+ filenames can only have assignments or if statements. First
+ translate if statements into CodeLine + (dirname or filename
+ creation).
+ """
+ lineno = 0
+
+ line = self.translated_filename[len(self.filebase):]
+ if line.startswith("/"):
+ line = line[1:]
+ opentag_start = -1
+
+ start = line.find(OPEN_TAG)
+ while start != -1:
+ if not line[start:].startswith(ASSIGN_TAG):
+ opentag_start = start
+ break
+ start += len(ASSIGN_TAG)
+ start = line.find(OPEN_TAG, start)
+
+ if opentag_start != -1:
+ end = line.find(CLOSE_TAG, opentag_start)
+ if end == -1:
+ self.parse_error("No close tag found for open tag", lineno, line)
+ # we have a {{ tag i.e. code
+ tag = line[opentag_start + len(OPEN_TAG):end].strip()
+ if not tag.lstrip().startswith(IF_TAG):
+ self.parse_error("Only 'if' tags are allowed in file or directory names",
+ lineno, line)
+ self.expanded_lines.append(CodeLine(tag))
+
+ # everything after }} is the actual filename (possibly with assignments)
+ # everything before is the pathname
+ line = line[:opentag_start] + line[end + len(CLOSE_TAG):].strip()
+
+ assign_start = line.find(ASSIGN_TAG)
+ if assign_start != -1:
+ assignment_tag = self.expand_assignment_tag(assign_start, line, lineno)
+ if isinstance(self, SubstrateFile):
+ assignment_tag.is_filename = True
+ assignment_tag.out_filebase = self.out_filebase
+ elif isinstance(self, SubstrateDir):
+ assignment_tag.is_dirname = True
+ assignment_tag.out_filebase = self.out_filebase
+ self.expanded_lines.append(assignment_tag)
+ return
+
+ normal_line = NormalLine(line)
+ if isinstance(self, SubstrateFile):
+ normal_line.is_filename = True
+ normal_line.out_filebase = self.out_filebase
+ elif isinstance(self, SubstrateDir):
+ normal_line.is_dirname = True
+ normal_line.out_filebase = self.out_filebase
+ self.expanded_lines.append(normal_line)
+
+ def expand(self):
+ """
+ Expand the file or dir name first, eventually this ends up
+ creating the file or dir.
+ """
+ self.translate_file_or_dir_name()
+ self.expand_file_or_dir_name()
+
+
+class SubstrateFile(SubstrateBase):
+ """
+ Container for both expanded and unexpanded substrate files.
+ """
+ def __init__(self, filename, filebase, out_filebase):
+ SubstrateBase.__init__(self, filename, filebase, out_filebase)
+
+ def read(self):
+ if self.raw_lines:
+ return
+ f = open(self.filename)
+ self.raw_lines = f.readlines()
+
+ def expand(self):
+ """Expand the contents of all template tags in the file."""
+ SubstrateBase.expand(self)
+ self.read()
+
+ for lineno, line in enumerate(self.raw_lines):
+ # only first line can be a filename substitition
+ if lineno == 0 and line.startswith("#") and FILENAME_TAG in line:
+ continue # skip it - we've already expanded it
+ expanded_line = self.expand_tag(line, lineno + 1) # humans not 0-based
+ if not expanded_line:
+ expanded_line = NormalLine(line.rstrip())
+ self.expanded_lines.append(expanded_line)
+
+ def gen(self, context = None):
+ """Generate the code that generates the BSP."""
+ base_indent = 0
+
+ indent = new_indent = base_indent
+
+ for line in self.expanded_lines:
+ genline = line.gen(context)
+ if not genline:
+ continue
+ if isinstance(line, InputLine):
+ line.generated_line = genline
+ continue
+ if genline.startswith(OPEN_START):
+ if indent == 1:
+ base_indent = 1
+ if indent:
+ if genline == BLANKLINE_STR or (not genline.startswith(NORMAL_START)
+ and not genline.startswith(OPEN_START)):
+ indent = new_indent = base_indent
+ if genline.endswith(":"):
+ new_indent = base_indent + 1
+ line.generated_line = (indent * INDENT_STR) + genline
+ indent = new_indent
+
+
+class SubstrateDir(SubstrateBase):
+ """
+ Container for both expanded and unexpanded substrate dirs.
+ """
+ def __init__(self, filename, filebase, out_filebase):
+ SubstrateBase.__init__(self, filename, filebase, out_filebase)
+
+ def expand(self):
+ SubstrateBase.expand(self)
+
+ def gen(self, context = None):
+ """Generate the code that generates the BSP."""
+ indent = new_indent = 0
+ for line in self.expanded_lines:
+ genline = line.gen(context)
+ if not genline:
+ continue
+ if genline.endswith(":"):
+ new_indent = 1
+ else:
+ new_indent = 0
+ line.generated_line = (indent * INDENT_STR) + genline
+ indent = new_indent
+
+
+def expand_target(target, all_files, out_filebase):
+ """
+ Expand the contents of all template tags in the target. This
+ means removing tags and categorizing or creating lines so that
+ future passes can process and present input lines and generate the
+ corresponding lines of the Python program that will be exec'ed to
+ actually produce the final BSP. 'all_files' includes directories.
+ """
+ for root, dirs, files in os.walk(target):
+ for file in files:
+ if file.endswith("~") or file.endswith("#"):
+ continue
+ f = os.path.join(root, file)
+ sfile = SubstrateFile(f, target, out_filebase)
+ sfile.expand()
+ all_files.append(sfile)
+
+ for dir in dirs:
+ d = os.path.join(root, dir)
+ sdir = SubstrateDir(d, target, out_filebase)
+ sdir.expand()
+ all_files.append(sdir)
+
+
+def gen_program_machine_lines(machine, program_lines):
+ """
+ Use the input values we got from the command line.
+ """
+ line = "machine = \"" + machine + "\""
+ program_lines.append(line)
+
+ line = "layer_name = \"" + machine + "\""
+ program_lines.append(line)
+
+
+def sort_inputlines(input_lines):
+ """Sort input lines according to priority (position)."""
+ input_lines.sort(key = lambda l: l.prio)
+
+
+def find_parent_dependency(lines, depends_on):
+ for i, line in lines:
+ if isinstance(line, CodeLine):
+ continue
+ if line.props["name"] == depends_on:
+ return i
+
+ return -1
+
+
+def process_inputline_dependencies(input_lines, all_inputlines):
+ """If any input lines depend on others, put the others first."""
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ group_inputlines = []
+ process_inputline_dependencies(line.group, group_inputlines)
+ line.group = group_inputlines
+ all_inputlines.append(line)
+ continue
+
+ if isinstance(line, CodeLine) or isinstance(line, NormalLine):
+ all_inputlines.append(line)
+ continue
+
+ try:
+ depends_on = line.props["depends-on"]
+ depends_codeline = "if " + line.props["depends-on"] + " == \"" + line.props["depends-on-val"] + "\":"
+ all_inputlines.append(CodeLine(depends_codeline))
+ all_inputlines.append(line)
+ except KeyError:
+ all_inputlines.append(line)
+
+
+def conditional_filename(filename):
+ """
+ Check if the filename itself contains a conditional statement. If
+ so, return a codeline for it.
+ """
+ opentag_start = filename.find(OPEN_TAG)
+
+ if opentag_start != -1:
+ if filename[opentag_start:].startswith(ASSIGN_TAG):
+ return None
+ end = filename.find(CLOSE_TAG, opentag_start)
+ if end == -1:
+ print "No close tag found for open tag in filename %s" % filename
+ sys.exit(1)
+
+ # we have a {{ tag i.e. code
+ tag = filename[opentag_start + len(OPEN_TAG):end].strip()
+ if not tag.lstrip().startswith(IF_TAG):
+ print "Only 'if' tags are allowed in file or directory names, filename: %s" % filename
+ sys.exit(1)
+
+ return CodeLine(tag)
+
+ return None
+
+
+class InputLineGroup(InputLine):
+ """
+ InputLine that does nothing but group other input lines
+ corresponding to all the input lines in a SubstrateFile so they
+ can be generated as a group. prio is the only property used.
+ """
+ def __init__(self, codeline):
+ InputLine.__init__(self, {}, "", 0)
+ self.group = []
+ self.prio = sys.maxint
+ self.group.append(codeline)
+
+ def append(self, line):
+ self.group.append(line)
+ if line.prio < self.prio:
+ self.prio = line.prio
+
+ def len(self):
+ return len(self.group)
+
+
+def gather_inputlines(files):
+ """
+ Gather all the InputLines - we want to generate them first.
+ """
+ all_inputlines = []
+ input_lines = []
+
+ for file in files:
+ if isinstance(file, SubstrateFile):
+ group = None
+ basename = os.path.basename(file.translated_filename)
+
+ codeline = conditional_filename(basename)
+ if codeline:
+ group = InputLineGroup(codeline)
+
+ have_condition = False
+ condition_to_write = None
+ for line in file.expanded_lines:
+ if isinstance(line, CodeLine):
+ have_condition = True
+ condition_to_write = line
+ continue
+ if isinstance(line, InputLine):
+ if group:
+ if condition_to_write:
+ condition_to_write.prio = line.prio
+ condition_to_write.discard = True
+ group.append(condition_to_write)
+ condition_to_write = None
+ group.append(line)
+ else:
+ if condition_to_write:
+ condition_to_write.prio = line.prio
+ condition_to_write.discard = True
+ input_lines.append(condition_to_write)
+ condition_to_write = None
+ input_lines.append(line)
+ else:
+ if condition_to_write:
+ condition_to_write = None
+ if have_condition:
+ if not line.line.strip():
+ line.discard = True
+ input_lines.append(line)
+ have_condition = False
+
+ if group and group.len() > 1:
+ input_lines.append(group)
+
+ sort_inputlines(input_lines)
+ process_inputline_dependencies(input_lines, all_inputlines)
+
+ return all_inputlines
+
+
+def run_program_lines(linelist, codedump):
+ """
+ For a single file, print all the python code into a buf and execute it.
+ """
+ buf = "\n".join(linelist)
+
+ if codedump:
+ of = open("bspgen.out", "w")
+ of.write(buf)
+ of.close()
+ exec buf
+
+
+def gen_target(files, context = None):
+ """
+ Generate the python code for each file.
+ """
+ for file in files:
+ file.gen(context)
+
+
+def gen_program_header_lines(program_lines):
+ """
+ Generate any imports we need.
+ """
+ program_lines.append("current_file = \"\"")
+
+
+def gen_supplied_property_vals(properties, program_lines):
+ """
+ Generate user-specified entries for input values instead of
+ generating input prompts.
+ """
+ for name, val in properties.iteritems():
+ program_line = name + " = \"" + val + "\""
+ program_lines.append(program_line)
+
+
+def gen_initial_property_vals(input_lines, program_lines):
+ """
+ Generate null or default entries for input values, so we don't
+ have undefined variables.
+ """
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ gen_initial_property_vals(line.group, program_lines)
+ continue
+
+ if isinstance(line, InputLine):
+ try:
+ name = line.props["name"]
+ try:
+ default_val = "\"" + line.props["default"] + "\""
+ except:
+ default_val = "\"\""
+ program_line = name + " = " + default_val
+ program_lines.append(program_line)
+ except KeyError:
+ pass
+
+
+def gen_program_input_lines(input_lines, program_lines, context, in_group = False):
+ """
+ Generate only the input lines used for prompting the user. For
+ that, we only have input lines and CodeLines that affect the next
+ input line.
+ """
+ indent = new_indent = 0
+
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ gen_program_input_lines(line.group, program_lines, context, True)
+ continue
+ if not line.line.strip():
+ continue
+
+ genline = line.gen(context)
+ if not genline:
+ continue
+ if genline.endswith(":"):
+ new_indent += 1
+ else:
+ if indent > 1 or (not in_group and indent):
+ new_indent -= 1
+
+ line.generated_line = (indent * INDENT_STR) + genline
+ program_lines.append(line.generated_line)
+
+ indent = new_indent
+
+
+def gen_program_lines(target_files, program_lines):
+ """
+ Generate the program lines that make up the BSP generation
+ program. This appends the generated lines of all target_files to
+ program_lines, and skips input lines, which are dealt with
+ separately, or omitted.
+ """
+ for file in target_files:
+ if file.filename.endswith("noinstall"):
+ continue
+
+ for line in file.expanded_lines:
+ if isinstance(line, InputLine):
+ continue
+ if line.discard:
+ continue
+
+ program_lines.append(line.generated_line)
+
+
+def create_context(machine, arch, scripts_path):
+ """
+ Create a context object for use in deferred function invocation.
+ """
+ context = {}
+
+ context["machine"] = machine
+ context["arch"] = arch
+ context["scripts_path"] = scripts_path
+
+ return context
+
+
+def capture_context(context):
+ """
+ Create a context object for use in deferred function invocation.
+ """
+ captured_context = {}
+
+ captured_context["machine"] = context["machine"]
+ captured_context["arch"] = context["arch"]
+ captured_context["scripts_path"] = context["scripts_path"]
+
+ return captured_context
+
+
+def expand_targets(context, bsp_output_dir, expand_common=True):
+ """
+ Expand all the tags in both the common and machine-specific
+ 'targets'.
+
+ If expand_common is False, don't expand the common target (this
+ option is used to create special-purpose layers).
+ """
+ target_files = []
+
+ machine = context["machine"]
+ arch = context["arch"]
+ scripts_path = context["scripts_path"]
+
+ lib_path = scripts_path + '/lib'
+ bsp_path = lib_path + '/bsp'
+ arch_path = bsp_path + '/substrate/target/arch'
+
+ if expand_common:
+ common = os.path.join(arch_path, "common")
+ expand_target(common, target_files, bsp_output_dir)
+
+ arches = os.listdir(arch_path)
+ if arch not in arches or arch == "common":
+ print "Invalid karch, exiting\n"
+ sys.exit(1)
+
+ target = os.path.join(arch_path, arch)
+ expand_target(target, target_files, bsp_output_dir)
+
+ gen_target(target_files, context)
+
+ return target_files
+
+
+def yocto_common_create(machine, target, scripts_path, layer_output_dir, codedump, properties_file, properties_str="", expand_common=True):
+ """
+ Common layer-creation code
+
+ machine - user-defined machine name (if needed, will generate 'machine' var)
+ target - the 'target' the layer will be based on, must be one in
+ scripts/lib/bsp/substrate/target/arch
+ scripts_path - absolute path to yocto /scripts dir
+ layer_output_dir - dirname to create for layer
+ codedump - dump generated code to bspgen.out
+ properties_file - use values from this file if nonempty i.e no prompting
+ properties_str - use values from this string if nonempty i.e no prompting
+ expand_common - boolean, use the contents of (for bsp layers) arch/common
+ """
+ if os.path.exists(layer_output_dir):
+ print "\nlayer output dir already exists, exiting. (%s)" % layer_output_dir
+ sys.exit(1)
+
+ properties = None
+
+ if properties_file:
+ try:
+ infile = open(properties_file, "r")
+ except IOError:
+ print "Couldn't open properties file %s for reading, exiting" % properties_file
+ sys.exit(1)
+
+ properties = json.load(infile)
+
+ if properties_str and not properties:
+ properties = json.loads(properties_str)
+
+ os.mkdir(layer_output_dir)
+
+ context = create_context(machine, target, scripts_path)
+ target_files = expand_targets(context, layer_output_dir, expand_common)
+
+ input_lines = gather_inputlines(target_files)
+
+ program_lines = []
+
+ gen_program_header_lines(program_lines)
+
+ gen_initial_property_vals(input_lines, program_lines)
+
+ if properties:
+ gen_supplied_property_vals(properties, program_lines)
+
+ gen_program_machine_lines(machine, program_lines)
+
+ if not properties:
+ gen_program_input_lines(input_lines, program_lines, context)
+
+ gen_program_lines(target_files, program_lines)
+
+ run_program_lines(program_lines, codedump)
+
+
+def yocto_layer_create(layer_name, scripts_path, layer_output_dir, codedump, properties_file, properties=""):
+ """
+ Create yocto layer
+
+ layer_name - user-defined layer name
+ scripts_path - absolute path to yocto /scripts dir
+ layer_output_dir - dirname to create for layer
+ codedump - dump generated code to bspgen.out
+ properties_file - use values from this file if nonempty i.e no prompting
+ properties - use values from this string if nonempty i.e no prompting
+ """
+ yocto_common_create(layer_name, "layer", scripts_path, layer_output_dir, codedump, properties_file, properties, False)
+
+ print "\nNew layer created in %s.\n" % (layer_output_dir)
+ print "Don't forget to add it to your BBLAYERS (for details see %s/README)." % (layer_output_dir)
+
+
+def yocto_bsp_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties=None):
+ """
+ Create bsp
+
+ machine - user-defined machine name
+ arch - the arch the bsp will be based on, must be one in
+ scripts/lib/bsp/substrate/target/arch
+ scripts_path - absolute path to yocto /scripts dir
+ bsp_output_dir - dirname to create for BSP
+ codedump - dump generated code to bspgen.out
+ properties_file - use values from this file if nonempty i.e no prompting
+ properties - use values from this string if nonempty i.e no prompting
+ """
+ yocto_common_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties)
+
+ print "\nNew %s BSP created in %s" % (arch, bsp_output_dir)
+
+
+def print_dict(items, indent = 0):
+ """
+ Print the values in a possibly nested dictionary.
+ """
+ for key, val in items.iteritems():
+ print " "*indent + "\"%s\" :" % key,
+ if type(val) == dict:
+ print "{"
+ print_dict(val, indent + 1)
+ print " "*indent + "}"
+ else:
+ print "%s" % val
+
+
+def get_properties(input_lines):
+ """
+ Get the complete set of properties for all the input items in the
+ BSP, as a possibly nested dictionary.
+ """
+ properties = {}
+
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ statement = line.group[0].line
+ group_properties = get_properties(line.group)
+ properties[statement] = group_properties
+ continue
+
+ if not isinstance(line, InputLine):
+ continue
+
+ if isinstance(line, ChoiceInputLine):
+ continue
+
+ props = line.props
+ item = {}
+ name = props["name"]
+ for key, val in props.items():
+ if not key == "name":
+ item[key] = val
+ properties[name] = item
+
+ return properties
+
+
+def yocto_layer_list_properties(arch, scripts_path, properties_file, expand_common=True):
+ """
+ List the complete set of properties for all the input items in the
+ layer. If properties_file is non-null, write the complete set of
+ properties as a nested JSON object corresponding to a possibly
+ nested dictionary.
+ """
+ context = create_context("unused", arch, scripts_path)
+ target_files = expand_targets(context, "unused", expand_common)
+
+ input_lines = gather_inputlines(target_files)
+
+ properties = get_properties(input_lines)
+ if properties_file:
+ try:
+ of = open(properties_file, "w")
+ except IOError:
+ print "Couldn't open properties file %s for writing, exiting" % properties_file
+ sys.exit(1)
+
+ json.dump(properties, of, indent=1)
+ else:
+ print_dict(properties)
+
+
+def split_nested_property(property):
+ """
+ A property name of the form x.y describes a nested property
+ i.e. the property y is contained within x and can be addressed
+ using standard JSON syntax for nested properties. Note that if a
+ property name itself contains '.', it should be contained in
+ double quotes.
+ """
+ splittable_property = ""
+ in_quotes = False
+ for c in property:
+ if c == '.' and not in_quotes:
+ splittable_property += '\n'
+ continue
+ if c == '"':
+ in_quotes = not in_quotes
+ splittable_property += c
+
+ split_properties = splittable_property.split('\n')
+
+ if len(split_properties) > 1:
+ return split_properties
+
+ return None
+
+
+def find_input_line_group(substring, input_lines):
+ """
+ Find and return the InputLineGroup containing the specified substring.
+ """
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ if substring in line.group[0].line:
+ return line
+
+ return None
+
+
+def find_input_line(name, input_lines):
+ """
+ Find the input line with the specified name.
+ """
+ for line in input_lines:
+ if isinstance(line, InputLineGroup):
+ l = find_input_line(name, line.group)
+ if l:
+ return l
+
+ if isinstance(line, InputLine):
+ try:
+ if line.props["name"] == name:
+ return line
+ if line.props["name"] + "_" + line.props["nameappend"] == name:
+ return line
+ except KeyError:
+ pass
+
+ return None
+
+
+def print_values(type, values_list):
+ """
+ Print the values in the given list of values.
+ """
+ if type == "choicelist":
+ for value in values_list:
+ print "[\"%s\", \"%s\"]" % (value[0], value[1])
+ elif type == "boolean":
+ for value in values_list:
+ print "[\"%s\", \"%s\"]" % (value[0], value[1])
+
+
+def yocto_layer_list_property_values(arch, property, scripts_path, properties_file, expand_common=True):
+ """
+ List the possible values for a given input property. If
+ properties_file is non-null, write the complete set of properties
+ as a JSON object corresponding to an array of possible values.
+ """
+ context = create_context("unused", arch, scripts_path)
+ context["name"] = property
+
+ target_files = expand_targets(context, "unused", expand_common)
+
+ input_lines = gather_inputlines(target_files)
+
+ properties = get_properties(input_lines)
+
+ nested_properties = split_nested_property(property)
+ if nested_properties:
+ # currently the outer property of a nested property always
+ # corresponds to an input line group
+ input_line_group = find_input_line_group(nested_properties[0], input_lines)
+ if input_line_group:
+ input_lines[:] = input_line_group.group[1:]
+ # The inner property of a nested property name is the
+ # actual property name we want, so reset to that
+ property = nested_properties[1]
+
+ input_line = find_input_line(property, input_lines)
+ if not input_line:
+ print "Couldn't find values for property %s" % property
+ return
+
+ values_list = []
+
+ type = input_line.props["type"]
+ if type == "boolean":
+ values_list.append(["y", "n"])
+ elif type == "choicelist" or type == "checklist":
+ try:
+ gen_fn = input_line.props["gen"]
+ if nested_properties:
+ context["filename"] = nested_properties[0]
+ try:
+ context["branches_base"] = input_line.props["branches_base"]
+ except KeyError:
+ context["branches_base"] = None
+ values_list = input_line.gen_choices_list(context, False)
+ except KeyError:
+ for choice in input_line.choices:
+ choicepair = []
+ choicepair.append(choice.val)
+ choicepair.append(choice.desc)
+ values_list.append(choicepair)
+
+ if properties_file:
+ try:
+ of = open(properties_file, "w")
+ except IOError:
+ print "Couldn't open properties file %s for writing, exiting" % properties_file
+ sys.exit(1)
+
+ json.dump(values_list, of)
+
+ print_values(type, values_list)
+
+
+def yocto_bsp_list(args, scripts_path, properties_file):
+ """
+ Print available architectures, or the complete list of properties
+ defined by the BSP, or the possible values for a particular BSP
+ property.
+ """
+ if len(args) < 1:
+ return False
+
+ if args[0] == "karch":
+ lib_path = scripts_path + '/lib'
+ bsp_path = lib_path + '/bsp'
+ arch_path = bsp_path + '/substrate/target/arch'
+ print "Architectures available:"
+ for arch in os.listdir(arch_path):
+ if arch == "common" or arch == "layer":
+ continue
+ print " %s" % arch
+ return True
+ else:
+ arch = args[0]
+
+ if len(args) < 2 or len(args) > 3:
+ return False
+
+ if len(args) == 2:
+ if args[1] == "properties":
+ yocto_layer_list_properties(arch, scripts_path, properties_file)
+ else:
+ return False
+
+ if len(args) == 3:
+ if args[1] == "property":
+ yocto_layer_list_property_values(arch, args[2], scripts_path, properties_file)
+ else:
+ return False
+
+ return True
+
+
+def yocto_layer_list(args, scripts_path, properties_file):
+ """
+ Print the complete list of input properties defined by the layer,
+ or the possible values for a particular layer property.
+ """
+ if len(args) < 1:
+ return False
+
+ if len(args) < 1 or len(args) > 2:
+ return False
+
+ if len(args) == 1:
+ if args[0] == "properties":
+ yocto_layer_list_properties("layer", scripts_path, properties_file, False)
+ else:
+ return False
+
+ if len(args) == 2:
+ if args[0] == "property":
+ yocto_layer_list_property_values("layer", args[1], scripts_path, properties_file, False)
+ else:
+ return False
+
+ return True
+
+
+def map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch):
+ """
+ Return the linux-yocto bsp branch to use with the specified
+ kbranch. This handles the -standard variants for 3.4 and 3.8; the
+ other variants don't need mappings.
+ """
+ if need_new_kbranch == "y":
+ kbranch = new_kbranch
+ else:
+ kbranch = existing_kbranch
+
+ if kbranch.startswith("standard/common-pc-64"):
+ return "bsp/common-pc-64/common-pc-64-standard.scc"
+ if kbranch.startswith("standard/common-pc"):
+ return "bsp/common-pc/common-pc-standard.scc"
+ else:
+ return "ktypes/standard/standard.scc"
+
+
+def map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch):
+ """
+ Return the linux-yocto bsp branch to use with the specified
+ kbranch. This handles the -preempt-rt variants for 3.4 and 3.8;
+ the other variants don't need mappings.
+ """
+ if need_new_kbranch == "y":
+ kbranch = new_kbranch
+ else:
+ kbranch = existing_kbranch
+
+ if kbranch.startswith("standard/preempt-rt/common-pc-64"):
+ return "bsp/common-pc-64/common-pc-64-preempt-rt.scc"
+ if kbranch.startswith("standard/preempt-rt/common-pc"):
+ return "bsp/common-pc/common-pc-preempt-rt.scc"
+ else:
+ return "ktypes/preempt-rt/preempt-rt.scc"
+
+
+def map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch):
+ """
+ Return the linux-yocto bsp branch to use with the specified
+ kbranch. This handles the -tiny variants for 3.4 and 3.8; the
+ other variants don't need mappings.
+ """
+ if need_new_kbranch == "y":
+ kbranch = new_kbranch
+ else:
+ kbranch = existing_kbranch
+
+ if kbranch.startswith("standard/tiny/common-pc"):
+ return "bsp/common-pc/common-pc-tiny.scc"
+ else:
+ return "ktypes/tiny/tiny.scc"
diff --git a/scripts/lib/bsp/help.py b/scripts/lib/bsp/help.py
new file mode 100644
index 0000000..85a09dd
--- /dev/null
+++ b/scripts/lib/bsp/help.py
@@ -0,0 +1,1046 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module implements some basic help invocation functions along
+# with the bulk of the help topic text for the Yocto BSP Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] intel.com>
+#
+
+import subprocess
+import logging
+
+
+def subcommand_error(args):
+ logging.info("invalid subcommand %s" % args[0])
+
+
+def display_help(subcommand, subcommands):
+ """
+ Display help for subcommand.
+ """
+ if subcommand not in subcommands:
+ return False
+
+ help = subcommands.get(subcommand, subcommand_error)[2]
+ pager = subprocess.Popen('less', stdin=subprocess.PIPE)
+ pager.communicate(help)
+
+ return True
+
+
+def yocto_help(args, usage_str, subcommands):
+ """
+ Subcommand help dispatcher.
+ """
+ if len(args) == 1 or not display_help(args[1], subcommands):
+ print(usage_str)
+
+
+def invoke_subcommand(args, parser, main_command_usage, subcommands):
+ """
+ Dispatch to subcommand handler borrowed from combo-layer.
+ Should use argparse, but has to work in 2.6.
+ """
+ if not args:
+ logging.error("No subcommand specified, exiting")
+ parser.print_help()
+ elif args[0] == "help":
+ yocto_help(args, main_command_usage, subcommands)
+ elif args[0] not in subcommands:
+ logging.error("Unsupported subcommand %s, exiting\n" % (args[0]))
+ parser.print_help()
+ else:
+ usage = subcommands.get(args[0], subcommand_error)[1]
+ subcommands.get(args[0], subcommand_error)[0](args[1:], usage)
+
+
+##
+# yocto-bsp help and usage strings
+##
+
+yocto_bsp_usage = """
+
+ Create a customized Yocto BSP layer.
+
+ usage: yocto-bsp [--version] [--help] COMMAND [ARGS]
+
+ Current 'yocto-bsp' commands are:
+ create Create a new Yocto BSP
+ list List available values for options and BSP properties
+
+ See 'yocto-bsp help COMMAND' for more information on a specific command.
+"""
+
+yocto_bsp_help_usage = """
+
+ usage: yocto-bsp help <subcommand>
+
+ This command displays detailed help for the specified subcommand.
+"""
+
+yocto_bsp_create_usage = """
+
+ Create a new Yocto BSP
+
+ usage: yocto-bsp create <bsp-name> <karch> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+ [-c | --codedump] [-s | --skip-git-check]
+
+ This command creates a Yocto BSP based on the specified parameters.
+ The new BSP will be a new Yocto BSP layer contained by default within
+ the top-level directory specified as 'meta-bsp-name'. The -o option
+ can be used to place the BSP layer in a directory with a different
+ name and location.
+
+ The value of the 'karch' parameter determines the set of files that
+ will be generated for the BSP, along with the specific set of
+ 'properties' that will be used to fill out the BSP-specific portions
+ of the BSP. The possible values for the 'karch' parameter can be
+ listed via 'yocto-bsp list karch'.
+
+ NOTE: Once created, you should add your new layer to your
+ bblayers.conf file in order for it to be subsequently seen and
+ modified by the yocto-kernel tool.
+
+ See 'yocto bsp help create' for more detailed instructions.
+"""
+
+yocto_bsp_create_help = """
+
+NAME
+ yocto-bsp create - Create a new Yocto BSP
+
+SYNOPSIS
+ yocto-bsp create <bsp-name> <karch> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+ [-c | --codedump] [-s | --skip-git-check]
+
+DESCRIPTION
+ This command creates a Yocto BSP based on the specified
+ parameters. The new BSP will be a new Yocto BSP layer contained
+ by default within the top-level directory specified as
+ 'meta-bsp-name'. The -o option can be used to place the BSP layer
+ in a directory with a different name and location.
+
+ The value of the 'karch' parameter determines the set of files
+ that will be generated for the BSP, along with the specific set of
+ 'properties' that will be used to fill out the BSP-specific
+ portions of the BSP. The possible values for the 'karch' parameter
+ can be listed via 'yocto-bsp list karch'.
+
+ The BSP-specific properties that define the values that will be
+ used to generate a particular BSP can be specified on the
+ command-line using the -i option and supplying a JSON object
+ consisting of the set of name:value pairs needed by the BSP.
+
+ If the -i option is not used, the user will be interactively
+ prompted for each of the required property values, which will then
+ be used as values for BSP generation.
+
+ The set of properties available for a given architecture can be
+ listed using the 'yocto-bsp list' command.
+
+ Specifying -c causes the Python code generated and executed to
+ create the BSP to be dumped to the 'bspgen.out' file in the
+ current directory, and is useful for debugging.
+
+ NOTE: Once created, you should add your new layer to your
+ bblayers.conf file in order for it to be subsequently seen and
+ modified by the yocto-kernel tool.
+
+ For example, assuming your poky repo is at /path/to/poky, your new
+ BSP layer is at /path/to/poky/meta-mybsp, and your build directory
+ is /path/to/build:
+
+ $ gedit /path/to/build/conf/bblayers.conf
+
+ BBLAYERS ?= " \\
+ /path/to/poky/meta \\
+ /path/to/poky/meta-poky \\
+ /path/to/poky/meta-mybsp \\
+ "
+"""
+
+yocto_bsp_list_usage = """
+
+ usage: yocto-bsp list karch
+ yocto-bsp list <karch> properties
+ [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
+ yocto-bsp list <karch> property <xxx>
+ [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
+
+ This command enumerates the complete set of possible values for a
+ specified option or property needed by the BSP creation process.
+
+ The first form enumerates all the possible values that exist and can
+ be specified for the 'karch' parameter to the 'yocto bsp create'
+ command.
+
+ The second form enumerates all the possible properties that exist and
+ must have values specified for them in the 'yocto bsp create' command
+ for the given 'karch'.
+
+ The third form enumerates all the possible values that exist and can
+ be specified for any of the enumerable properties of the given
+ 'karch' in the 'yocto bsp create' command.
+
+ See 'yocto-bsp help list' for more details.
+"""
+
+yocto_bsp_list_help = """
+
+NAME
+ yocto-bsp list - List available values for options and BSP properties
+
+SYNOPSIS
+ yocto-bsp list karch
+ yocto-bsp list <karch> properties
+ [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
+ yocto-bsp list <karch> property <xxx>
+ [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
+
+DESCRIPTION
+ This command enumerates the complete set of possible values for a
+ specified option or property needed by the BSP creation process.
+
+ The first form enumerates all the possible values that exist and
+ can be specified for the 'karch' parameter to the 'yocto bsp
+ create' command. Example output for the 'list karch' command:
+
+ $ yocto-bsp list karch
+ Architectures available:
+ arm
+ powerpc
+ i386
+ mips
+ mips64
+ x86_64
+ qemu
+
+ The second form enumerates all the possible properties that exist
+ and must have values specified for them in the 'yocto bsp create'
+ command for the given 'karch'. This command is mainly meant to
+ allow the development user interface alternatives to the default
+ text-based prompting interface. If the -o option is specified,
+ the list of properties, in addition to being displayed, will be
+ written to the specified file as a JSON object. In this case, the
+ object will consist of the set of name:value pairs corresponding
+ to the (possibly nested) dictionary of properties defined by the
+ input statements used by the BSP. Some example output for the
+ 'list properties' command:
+
+ $ yocto-bsp list arm properties
+ "touchscreen" : {
+ "msg" : Does your BSP have a touchscreen? (y/N)
+ "default" : n
+ "type" : boolean
+ }
+ "uboot_loadaddress" : {
+ "msg" : Please specify a value for UBOOT_LOADADDRESS.
+ "default" : 0x80008000
+ "type" : edit
+ "prio" : 40
+ }
+ "kernel_choice" : {
+ "prio" : 10
+ "default" : linux-yocto_3.2
+ "depends-on" : use_default_kernel
+ "depends-on-val" : n
+ "msg" : Please choose the kernel to use in this BSP =>
+ "type" : choicelist
+ "gen" : bsp.kernel.kernels
+ }
+ "if kernel_choice == "linux-yocto_3.0":" : {
+ "base_kbranch_linux_yocto_3_0" : {
+ "prio" : 20
+ "default" : yocto/standard
+ "depends-on" : new_kbranch_linux_yocto_3_0
+ "depends-on-val" : y
+ "msg" : Please choose a machine branch to base this BSP on =>
+ "type" : choicelist
+ "gen" : bsp.kernel.all_branches
+ }
+ .
+ .
+ .
+
+ Each entry in the output consists of the name of the input element
+ e.g. "touchscreen", followed by the properties defined for that
+ element enclosed in braces. This information should provide
+ sufficient information to create a complete user interface with.
+ Two features of the scheme provide for conditional input. First,
+ if a Python "if" statement appears in place of an input element
+ name, the set of enclosed input elements apply and should be
+ presented to the user only if the 'if' statement evaluates to
+ true. The test in the if statement will always reference another
+ input element in the list, which means that the element being
+ tested should be presented to the user before the elements
+ enclosed by the if block. Secondly, in a similar way, some
+ elements contain "depends-on" and depends-on-val" tags, which mean
+ that the affected input element should only be presented to the
+ user if the element it depends on has already been presented to
+ the user and the user has selected the specified value for that
+ element.
+
+ The third form enumerates all the possible values that exist and
+ can be specified for any of the enumerable properties of the given
+ 'karch' in the 'yocto bsp create' command. If the -o option is
+ specified, the list of values for the given property, in addition
+ to being displayed, will be written to the specified file as a
+ JSON object. In this case, the object will consist of the set of
+ name:value pairs corresponding to the array of property values
+ associated with the property.
+
+ $ yocto-bsp list i386 property xserver_choice
+ ["xserver_vesa", "VESA xserver support"]
+ ["xserver_i915", "i915 xserver support"]
+
+ $ yocto-bsp list arm property base_kbranch_linux_yocto_3_0
+ Getting branches from remote repo git://git.yoctoproject.org/linux-yocto-3.0...
+ ["yocto/base", "yocto/base"]
+ ["yocto/eg20t", "yocto/eg20t"]
+ ["yocto/gma500", "yocto/gma500"]
+ ["yocto/pvr", "yocto/pvr"]
+ ["yocto/standard/arm-versatile-926ejs", "yocto/standard/arm-versatile-926ejs"]
+ ["yocto/standard/base", "yocto/standard/base"]
+ ["yocto/standard/cedartrail", "yocto/standard/cedartrail"]
+ .
+ .
+ .
+ ["yocto/standard/qemu-ppc32", "yocto/standard/qemu-ppc32"]
+ ["yocto/standard/routerstationpro", "yocto/standard/routerstationpro"]
+
+ The third form as well is meant mainly for developers of
+ alternative interfaces - it allows the developer to fetch the
+ possible values for a given input element on-demand. This
+ on-demand capability is especially valuable for elements that
+ require relatively expensive remote operations to fulfill, such as
+ the example that returns the set of branches available in a remote
+ git tree above.
+
+"""
+
+##
+# yocto-kernel help and usage strings
+##
+
+yocto_kernel_usage = """
+
+ Modify and list Yocto BSP kernel config items and patches.
+
+ usage: yocto-kernel [--version] [--help] COMMAND [ARGS]
+
+ Current 'yocto-kernel' commands are:
+ config list List the modifiable set of bare kernel config options for a BSP
+ config add Add or modify bare kernel config options for a BSP
+ config rm Remove bare kernel config options from a BSP
+ patch list List the patches associated with a BSP
+ patch add Patch the Yocto kernel for a BSP
+ patch rm Remove patches from a BSP
+ feature list List the features used by a BSP
+ feature add Have a BSP use a feature
+ feature rm Have a BSP stop using a feature
+ features list List the features available to BSPs
+ feature describe Describe a particular feature
+ feature create Create a new BSP-local feature
+ feature destroy Remove a BSP-local feature
+
+ See 'yocto-kernel help COMMAND' for more information on a specific command.
+
+"""
+
+
+yocto_kernel_help_usage = """
+
+ usage: yocto-kernel help <subcommand>
+
+ This command displays detailed help for the specified subcommand.
+"""
+
+yocto_kernel_config_list_usage = """
+
+ List the modifiable set of bare kernel config options for a BSP
+
+ usage: yocto-kernel config list <bsp-name>
+
+ This command lists the 'modifiable' config items for a BSP i.e. the
+ items which are eligible for modification or removal by other
+ yocto-kernel commands.
+
+ 'modifiable' config items are the config items contained a BSP's
+ user-config.cfg base config.
+"""
+
+
+yocto_kernel_config_list_help = """
+
+NAME
+ yocto-kernel config list - List the modifiable set of bare kernel
+ config options for a BSP
+
+SYNOPSIS
+ yocto-kernel config list <bsp-name>
+
+DESCRIPTION
+ This command lists the 'modifiable' config items for a BSP
+ i.e. the items which are eligible for modification or removal by
+ other yocto-kernel commands.
+"""
+
+
+yocto_kernel_config_add_usage = """
+
+ Add or modify bare kernel config options for a BSP
+
+ usage: yocto-kernel config add <bsp-name> [<CONFIG_XXX=x> ...]
+
+ This command adds one or more CONFIG_XXX=x items to a BSP's user-config.cfg
+ base config.
+"""
+
+
+yocto_kernel_config_add_help = """
+
+NAME
+ yocto-kernel config add - Add or modify bare kernel config options
+ for a BSP
+
+SYNOPSIS
+ yocto-kernel config add <bsp-name> [<CONFIG_XXX=x> ...]
+
+DESCRIPTION
+ This command adds one or more CONFIG_XXX=x items to a BSP's
+ foo.cfg base config.
+
+ NOTE: It's up to the user to determine whether or not the config
+ options being added make sense or not - this command does no
+ sanity checking or verification of any kind to ensure that a
+ config option really makes sense and will actually be set in in
+ the final config. For example, if a config option depends on
+ other config options, it will be turned off by kconfig if the
+ other options aren't set correctly.
+"""
+
+
+yocto_kernel_config_rm_usage = """
+
+ Remove bare kernel config options from a BSP
+
+ usage: yocto-kernel config rm <bsp-name>
+
+ This command removes (turns off) one or more CONFIG_XXX items from a
+ BSP's user-config.cfg base config.
+
+ The set of config items available to be removed by this command for a
+ BSP is listed and the user prompted for the specific items to remove.
+"""
+
+
+yocto_kernel_config_rm_help = """
+
+NAME
+ yocto-kernel config rm - Remove bare kernel config options from a
+ BSP
+
+SYNOPSIS
+ yocto-kernel config rm <bsp-name>
+
+DESCRIPTION
+ This command removes (turns off) one or more CONFIG_XXX items from a
+ BSP's user-config.cfg base config.
+
+ The set of config items available to be removed by this command
+ for a BSP is listed and the user prompted for the specific items
+ to remove.
+"""
+
+
+yocto_kernel_patch_list_usage = """
+
+ List the patches associated with the kernel for a BSP
+
+ usage: yocto-kernel patch list <bsp-name>
+
+ This command lists the patches associated with a BSP.
+
+ NOTE: this only applies to patches listed in the kernel recipe's
+ user-patches.scc file (and currently repeated in its SRC_URI).
+"""
+
+
+yocto_kernel_patch_list_help = """
+
+NAME
+ yocto-kernel patch list - List the patches associated with the kernel
+ for a BSP
+
+SYNOPSIS
+ yocto-kernel patch list <bsp-name>
+
+DESCRIPTION
+ This command lists the patches associated with a BSP.
+
+ NOTE: this only applies to patches listed in the kernel recipe's
+ user-patches.scc file (and currently repeated in its SRC_URI).
+"""
+
+
+yocto_kernel_patch_add_usage = """
+
+ Patch the Yocto kernel for a specific BSP
+
+ usage: yocto-kernel patch add <bsp-name> [<PATCH> ...]
+
+ This command adds one or more patches to a BSP's machine branch. The
+ patch will be added to the BSP's linux-yocto kernel user-patches.scc
+ file (and currently repeated in its SRC_URI) and will be guaranteed
+ to be applied in the order specified.
+"""
+
+
+yocto_kernel_patch_add_help = """
+
+NAME
+ yocto-kernel patch add - Patch the Yocto kernel for a specific BSP
+
+SYNOPSIS
+ yocto-kernel patch add <bsp-name> [<PATCH> ...]
+
+DESCRIPTION
+ This command adds one or more patches to a BSP's machine branch.
+ The patch will be added to the BSP's linux-yocto kernel
+ user-patches.scc file (and currently repeated in its SRC_URI) and
+ will be guaranteed to be applied in the order specified.
+
+ NOTE: It's up to the user to determine whether or not the patches
+ being added makes sense or not - this command does no sanity
+ checking or verification of any kind to ensure that a patch can
+ actually be applied to the BSP's kernel branch; it's assumed that
+ the user has already done that.
+"""
+
+
+yocto_kernel_patch_rm_usage = """
+
+ Remove a patch from the Yocto kernel for a specific BSP
+
+ usage: yocto-kernel patch rm <bsp-name>
+
+ This command removes one or more patches from a BSP's machine branch.
+ The patch will be removed from the BSP's linux-yocto kernel
+ user-patches.scc file (and currently repeated in its SRC_URI) and
+ kernel SRC_URI dir.
+
+ The set of patches available to be removed by this command for a BSP
+ is listed and the user prompted for the specific patches to remove.
+"""
+
+
+yocto_kernel_patch_rm_help = """
+
+NAME
+ yocto-kernel patch rm - Remove a patch from the Yocto kernel for a specific BSP
+
+SYNOPSIS
+ yocto-kernel patch rm <bsp-name>
+
+DESCRIPTION
+ This command removes one or more patches from a BSP's machine
+ branch. The patch will be removed from the BSP's linux-yocto
+ kernel user-patches.scc file (and currently repeated in its
+ SRC_URI).
+
+ The set of patches available to be removed by this command for a
+ BSP is listed and the user prompted for the specific patches to
+ remove.
+"""
+
+yocto_kernel_feature_list_usage = """
+
+ List the BSP features that are being used by a BSP
+
+ usage: yocto-kernel feature list <bsp-name>
+
+ This command lists the features being used by a BSP i.e. the features
+ which are eligible for modification or removal by other yocto-kernel
+ commands.
+
+ 'modifiable' features are the features listed in a BSP's
+ user-features.scc file.
+"""
+
+
+yocto_kernel_feature_list_help = """
+
+NAME
+ yocto-kernel feature list - List the modifiable set of features
+ being used by a BSP
+
+SYNOPSIS
+ yocto-kernel feature list <bsp-name>
+
+DESCRIPTION
+ This command lists the 'modifiable' features being used by a BSP
+ i.e. the features which are eligible for modification or removal
+ by other yocto-kernel commands.
+"""
+
+
+yocto_kernel_feature_add_usage = """
+
+ Add to or modify the list of features being used for a BSP
+
+ usage: yocto-kernel feature add <bsp-name> [/xxxx/yyyy/feature.scc ...]
+
+ This command adds one or more feature items to a BSP's kernel
+ user-features.scc file, which is the file used to manage features in
+ a yocto-bsp-generated BSP. Features to be added must be specified as
+ fully-qualified feature names.
+"""
+
+
+yocto_kernel_feature_add_help = """
+
+NAME
+ yocto-kernel feature add - Add to or modify the list of features
+ being used for a BSP
+
+SYNOPSIS
+ yocto-kernel feature add <bsp-name> [/xxxx/yyyy/feature.scc ...]
+
+DESCRIPTION
+ This command adds one or more feature items to a BSP's
+ user-features.scc file, which is the file used to manage features
+ in a yocto-bsp-generated BSP. Features to be added must be
+ specified as fully-qualified feature names.
+"""
+
+
+yocto_kernel_feature_rm_usage = """
+
+ Remove a feature from the list of features being used for a BSP
+
+ usage: yocto-kernel feature rm <bsp-name>
+
+ This command removes (turns off) one or more features from a BSP's
+ user-features.scc file, which is the file used to manage features in
+ a yocto-bsp-generated BSP.
+
+ The set of features available to be removed by this command for a BSP
+ is listed and the user prompted for the specific items to remove.
+"""
+
+
+yocto_kernel_feature_rm_help = """
+
+NAME
+ yocto-kernel feature rm - Remove a feature from the list of
+ features being used for a BSP
+
+SYNOPSIS
+ yocto-kernel feature rm <bsp-name>
+
+DESCRIPTION
+ This command removes (turns off) one or more features from a BSP's
+ user-features.scc file, which is the file used to manage features
+ in a yocto-bsp-generated BSP.
+
+ The set of features available to be removed by this command for a
+ BSP is listed and the user prompted for the specific items to
+ remove.
+"""
+
+
+yocto_kernel_available_features_list_usage = """
+
+ List the set of kernel features available to a BSP
+
+ usage: yocto-kernel features list <bsp-name>
+
+ This command lists the complete set of kernel features available to a
+ BSP. This includes the features contained in linux-yocto meta
+ branches as well as recipe-space features defined locally to the BSP.
+"""
+
+
+yocto_kernel_available_features_list_help = """
+
+NAME
+ yocto-kernel features list - List the set of kernel features
+ available to a BSP
+
+SYNOPSIS
+ yocto-kernel features list <bsp-name>
+
+DESCRIPTION
+ This command lists the complete set of kernel features available
+ to a BSP. This includes the features contained in linux-yocto
+ meta branches as well as recipe-space features defined locally to
+ the BSP.
+"""
+
+
+yocto_kernel_feature_describe_usage = """
+
+ Print the description and compatibility information for a given kernel feature
+
+ usage: yocto-kernel feature describe <bsp-name> [/xxxx/yyyy/feature.scc ...]
+
+ This command prints the description and compatibility of a specific
+ feature in the format 'description [compatibility].
+"""
+
+
+yocto_kernel_feature_describe_help = """
+
+NAME
+ yocto-kernel feature describe - print the description and
+ compatibility information for a given kernel feature
+
+SYNOPSIS
+ yocto-kernel feature describe <bsp-name> [/xxxx/yyyy/feature.scc ...]
+
+DESCRIPTION
+ This command prints the description and compatibility of a
+ specific feature in the format 'description [compatibility]. If
+ the feature doesn't define a description or compatibility, a
+ string with generic unknown values will be printed.
+"""
+
+
+yocto_kernel_feature_create_usage = """
+
+ Create a recipe-space kernel feature in a BSP
+
+ usage: yocto-kernel feature create <bsp-name> newfeature.scc \
+ "Feature Description" capabilities [<CONFIG_XXX=x> ...] [<PATCH> ...]
+
+ This command creates a new kernel feature from the bare config
+ options and patches specified on the command-line.
+"""
+
+
+yocto_kernel_feature_create_help = """
+
+NAME
+ yocto-kernel feature create - create a recipe-space kernel feature
+ in a BSP
+
+SYNOPSIS
+ yocto-kernel feature create <bsp-name> newfeature.scc \
+ "Feature Description" capabilities [<CONFIG_XXX=x> ...] [<PATCH> ...]
+
+DESCRIPTION
+ This command creates a new kernel feature from the bare config
+ options and patches specified on the command-line. The new
+ feature will be created in recipe-space, specifically in either
+ the kernel .bbappend's /files/cfg or /files/features subdirectory,
+ depending on whether or not the feature contains config items only
+ or config items along with patches. The named feature must end
+ with .scc and must not contain a feature directory to contain the
+ feature (this will be determined automatically), and a feature
+ description in double-quotes along with a capabilities string
+ (which for the time being can be one of: 'all' or 'board').
+"""
+
+
+yocto_kernel_feature_destroy_usage = """
+
+ Destroy a recipe-space kernel feature in a BSP
+
+ usage: yocto-kernel feature destroy <bsp-name> feature.scc
+
+ This command destroys a kernel feature defined in the specified BSP's
+ recipe-space kernel definition.
+"""
+
+
+yocto_kernel_feature_destroy_help = """
+
+NAME
+ yocto-kernel feature destroy <bsp-name> feature.scc - destroy a
+ recipe-space kernel feature in a BSP
+
+SYNOPSIS
+ yocto-kernel feature destroy <bsp-name> feature.scc
+
+DESCRIPTION
+ This command destroys a kernel feature defined in the specified
+ BSP's recipe-space kernel definition. The named feature must end
+ with .scc and must not contain a feature directory to contain the
+ feature (this will be determined automatically). If the kernel
+ feature is in use by a BSP, it can't be removed until the BSP
+ stops using it (see yocto-kernel feature rm to stop using it).
+"""
+
+##
+# yocto-layer help and usage strings
+##
+
+yocto_layer_usage = """
+
+ Create a generic Yocto layer.
+
+ usage: yocto-layer [--version] [--help] COMMAND [ARGS]
+
+ Current 'yocto-layer' commands are:
+ create Create a new generic Yocto layer
+ list List available values for input options and properties
+
+ See 'yocto-layer help COMMAND' for more information on a specific command.
+"""
+
+yocto_layer_help_usage = """
+
+ usage: yocto-layer help <subcommand>
+
+ This command displays detailed help for the specified subcommand.
+"""
+
+yocto_layer_create_usage = """
+
+ Create a new generic Yocto layer
+
+ usage: yocto-layer create <layer-name> [layer_priority]
+ [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+
+ This command creates a generic Yocto layer based on the specified
+ parameters. The new layer will be a new Yocto layer contained by
+ default within the top-level directory specified as
+ 'meta-layer-name'. The -o option can be used to place the layer in a
+ directory with a different name and location.
+
+ If layer_priority is specified, a simple layer will be created using
+ the given layer priority, and the user will not be prompted for
+ further input.
+
+ NOTE: Once created, you should add your new layer to your
+ bblayers.conf file in order for it to be subsequently seen and
+ modified by the yocto-kernel tool. Instructions for doing this can
+ be found in the README file generated in the layer's top-level
+ directory.
+
+ See 'yocto layer help create' for more detailed instructions.
+"""
+
+yocto_layer_create_help = """
+
+NAME
+ yocto-layer create - Create a new generic Yocto layer
+
+SYNOPSIS
+ yocto-layer create <layer-name> [layer_priority]
+ [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+
+DESCRIPTION
+ This command creates a generic Yocto layer based on the specified
+ parameters. The new layer will be a new Yocto layer contained by
+ default within the top-level directory specified as
+ 'meta-layer-name'. The -o option can be used to place the layer
+ in a directory with a different name and location.
+
+ If layer_priority is specified, a simple layer will be created
+ using the given layer priority, and the user will not be prompted
+ for further input.
+
+ The layer-specific properties that define the values that will be
+ used to generate the layer can be specified on the command-line
+ using the -i option and supplying a JSON object consisting of the
+ set of name:value pairs needed by the layer.
+
+ If the -i option is not used, the user will be interactively
+ prompted for each of the required property values, which will then
+ be used as values for layer generation.
+
+ The set of properties available can be listed using the
+ 'yocto-layer list' command.
+
+ Specifying -c causes the Python code generated and executed to
+ create the layer to be dumped to the 'bspgen.out' file in the
+ current directory, and is useful for debugging.
+
+ NOTE: Once created, you should add your new layer to your
+ bblayers.conf file in order for it to be subsequently seen and
+ modified by the yocto-kernel tool. Instructions for doing this
+ can be found in the README file generated in the layer's top-level
+ directory.
+
+ For example, assuming your poky repo is at /path/to/poky, your new
+ layer is at /path/to/poky/meta-mylayer, and your build directory
+ is /path/to/build:
+
+ $ gedit /path/to/build/conf/bblayers.conf
+
+ BBLAYERS ?= " \\
+ /path/to/poky/meta \\
+ /path/to/poky/meta-yocto \\
+ /path/to/poky/meta-mylayer \\
+ "
+"""
+
+yocto_layer_list_usage = """
+
+ usage: yocto-layer list properties
+ [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
+ yocto-layer list property <xxx>
+ [-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
+
+ This command enumerates the complete set of possible values for a
+ specified option or property needed by the layer creation process.
+
+ The first form enumerates all the possible properties that exist and
+ must have values specified for them in the 'yocto-layer create'
+ command.
+
+ The second form enumerates all the possible values that exist and can
+ be specified for any of the enumerable properties in the 'yocto-layer
+ create' command.
+
+ See 'yocto-layer help list' for more details.
+"""
+
+yocto_layer_list_help = """
+
+NAME
+ yocto-layer list - List available values for layer input options and properties
+
+SYNOPSIS
+ yocto-layer list properties
+ [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
+ yocto-layer list property <xxx>
+ [--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
+
+DESCRIPTION
+ This command enumerates the complete set of possible values for a
+ specified option or property needed by the layer creation process.
+
+ The first form enumerates all the possible properties that exist
+ and must have values specified for them in the 'yocto-layer
+ create' command. This command is mainly meant to aid the
+ development of user interface alternatives to the default
+ text-based prompting interface. If the -o option is specified,
+ the list of properties, in addition to being displayed, will be
+ written to the specified file as a JSON object. In this case, the
+ object will consist of the set of name:value pairs corresponding
+ to the (possibly nested) dictionary of properties defined by the
+ input statements used by the BSP. Some example output for the
+ 'list properties' command:
+
+ $ yocto-layer list properties
+ "example_bbappend_name" : {
+ "default" : example
+ "msg" : Please enter the name you'd like to use for your bbappend file:
+ "type" : edit
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+ "create_example_recipe" : {
+ "default" : n
+ "msg" : Would you like to have an example recipe created? (y/n)
+ "type" : boolean
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+ "example_recipe_name" : {
+ "default" : example
+ "msg" : Please enter the name you'd like to use for your example recipe:
+ "type" : edit
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+ "layer_priority" : {
+ "default" : 6
+ "msg" : Please enter the layer priority you'd like to use for the layer:
+ "type" : edit
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+ "create_example_bbappend" : {
+ "default" : n
+ "msg" : Would you like to have an example bbappend file created? (y/n)
+ "type" : boolean
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+ "example_bbappend_version" : {
+ "default" : 0.1
+ "msg" : Please enter the version number you'd like to use for your bbappend file (this should match the recipe you're appending to):
+ "type" : edit
+ "prio" : 20
+ "filename" : /home/trz/yocto/yocto-layer-dev/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
+ }
+
+ Each entry in the output consists of the name of the input element
+ e.g. "layer_priority", followed by the properties defined for that
+ element enclosed in braces. This information should provide
+ sufficient information to create a complete user interface. Two
+ features of the scheme provide for conditional input. First, if a
+ Python "if" statement appears in place of an input element name,
+ the set of enclosed input elements apply and should be presented
+ to the user only if the 'if' statement evaluates to true. The
+ test in the if statement will always reference another input
+ element in the list, which means that the element being tested
+ should be presented to the user before the elements enclosed by
+ the if block. Secondly, in a similar way, some elements contain
+ "depends-on" and depends-on-val" tags, which mean that the
+ affected input element should only be presented to the user if the
+ element it depends on has already been presented to the user and
+ the user has selected the specified value for that element.
+
+ The second form enumerates all the possible values that exist and
+ can be specified for any of the enumerable properties in the
+ 'yocto-layer create' command. If the -o option is specified, the
+ list of values for the given property, in addition to being
+ displayed, will be written to the specified file as a JSON object.
+ In this case, the object will consist of the set of name:value
+ pairs corresponding to the array of property values associated
+ with the property.
+
+ $ yocto-layer list property layer_priority
+ [no output - layer_priority is a text field that has no enumerable values]
+
+ The second form as well is meant mainly for developers of
+ alternative interfaces - it allows the developer to fetch the
+ possible values for a given input element on-demand. This
+ on-demand capability is especially valuable for elements that
+ require relatively expensive remote operations to fulfill, such as
+ the example that returns the set of branches available in a remote
+ git tree above.
+
+"""
+
+##
+# test code
+##
+
+test_bsp_properties = {
+ 'smp': 'yes',
+ 'touchscreen': 'yes',
+ 'keyboard': 'no',
+ 'xserver': 'yes',
+ 'xserver_choice': 'xserver-i915',
+ 'features': ['goodfeature', 'greatfeature'],
+ 'tunefile': 'tune-quark',
+}
+
diff --git a/scripts/lib/bsp/kernel.py b/scripts/lib/bsp/kernel.py
new file mode 100644
index 0000000..0783228
--- /dev/null
+++ b/scripts/lib/bsp/kernel.py
@@ -0,0 +1,1072 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module implements the kernel-related functions used by
+# 'yocto-kernel' to manage kernel config items and patches for Yocto
+# BSPs.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] intel.com>
+#
+
+import sys
+import os
+import shutil
+from tags import *
+import glob
+import subprocess
+from engine import create_context
+
+
+def find_bblayers():
+ """
+ Find and return a sanitized list of the layers found in BBLAYERS.
+ """
+ try:
+ builddir = os.environ["BUILDDIR"]
+ except KeyError:
+ print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ sys.exit(1)
+ bblayers_conf = os.path.join(builddir, "conf/bblayers.conf")
+
+ layers = []
+
+ bitbake_env_cmd = "bitbake -e"
+ bitbake_env_lines = subprocess.Popen(bitbake_env_cmd, shell=True,
+ stdout=subprocess.PIPE).stdout.read()
+
+ if not bitbake_env_lines:
+ print "Couldn't get '%s' output, exiting." % bitbake_env_cmd
+ sys.exit(1)
+
+ for line in bitbake_env_lines.split('\n'):
+ bblayers = get_line_val(line, "BBLAYERS")
+ if (bblayers):
+ break
+
+ if not bblayers:
+ print "Couldn't find BBLAYERS in %s output, exiting." % \
+ bitbake_env_cmd
+ sys.exit(1)
+
+ raw_layers = bblayers.split()
+
+ for layer in raw_layers:
+ if layer == 'BBLAYERS' or '=' in layer:
+ continue
+ layers.append(layer)
+
+ return layers
+
+
+def get_line_val(line, key):
+ """
+ Extract the value from the VAR="val" string
+ """
+ if line.startswith(key + "="):
+ stripped_line = line.split('=')[1]
+ stripped_line = stripped_line.replace('\"', '')
+ return stripped_line
+ return None
+
+
+def find_meta_layer():
+ """
+ Find and return the meta layer in BBLAYERS.
+ """
+ layers = find_bblayers()
+
+ for layer in layers:
+ if layer.endswith("meta"):
+ return layer
+
+ return None
+
+
+def find_bsp_layer(machine):
+ """
+ Find and return a machine's BSP layer in BBLAYERS.
+ """
+ layers = find_bblayers()
+
+ for layer in layers:
+ if layer.endswith(machine):
+ return layer
+
+ print "Unable to find the BSP layer for machine %s." % machine
+ print "Please make sure it is listed in bblayers.conf"
+ sys.exit(1)
+
+
+def gen_choices_str(choices):
+ """
+ Generate a numbered list of choices from a list of choices for
+ display to the user.
+ """
+ choices_str = ""
+
+ for i, choice in enumerate(choices):
+ choices_str += "\t" + str(i + 1) + ") " + choice + "\n"
+
+ return choices_str
+
+
+def open_user_file(scripts_path, machine, userfile, mode):
+ """
+ Find one of the user files (user-config.cfg, user-patches.scc)
+ associated with the machine (could be in files/,
+ linux-yocto-custom/, etc). Returns the open file if found, None
+ otherwise.
+
+ The caller is responsible for closing the file returned.
+ """
+ layer = find_bsp_layer(machine)
+ linuxdir = os.path.join(layer, "recipes-kernel/linux")
+ linuxdir_list = os.listdir(linuxdir)
+ for fileobj in linuxdir_list:
+ fileobj_path = os.path.join(linuxdir, fileobj)
+ if os.path.isdir(fileobj_path):
+ userfile_name = os.path.join(fileobj_path, userfile)
+ try:
+ f = open(userfile_name, mode)
+ return f
+ except IOError:
+ continue
+ return None
+
+
+def read_config_items(scripts_path, machine):
+ """
+ Find and return a list of config items (CONFIG_XXX) in a machine's
+ user-defined config fragment [${machine}-user-config.cfg].
+ """
+ config_items = []
+
+ f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "r")
+ lines = f.readlines()
+ for line in lines:
+ s = line.strip()
+ if s and not s.startswith("#"):
+ config_items.append(s)
+ f.close()
+
+ return config_items
+
+
+def write_config_items(scripts_path, machine, config_items):
+ """
+ Write (replace) the list of config items (CONFIG_XXX) in a
+ machine's user-defined config fragment [${machine}=user-config.cfg].
+ """
+ f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "w")
+ for item in config_items:
+ f.write(item + "\n")
+ f.close()
+
+ kernel_contents_changed(scripts_path, machine)
+
+
+def yocto_kernel_config_list(scripts_path, machine):
+ """
+ Display the list of config items (CONFIG_XXX) in a machine's
+ user-defined config fragment [${machine}-user-config.cfg].
+ """
+ config_items = read_config_items(scripts_path, machine)
+
+ print "The current set of machine-specific kernel config items for %s is:" % machine
+ print gen_choices_str(config_items)
+
+
+def yocto_kernel_config_rm(scripts_path, machine):
+ """
+ Display the list of config items (CONFIG_XXX) in a machine's
+ user-defined config fragment [${machine}-user-config.cfg], prompt the user
+ for one or more to remove, and remove them.
+ """
+ config_items = read_config_items(scripts_path, machine)
+
+ print "Specify the kernel config items to remove:"
+ input = raw_input(gen_choices_str(config_items))
+ rm_choices = input.split()
+ rm_choices.sort()
+
+ removed = []
+
+ for choice in reversed(rm_choices):
+ try:
+ idx = int(choice) - 1
+ except ValueError:
+ print "Invalid choice (%s), exiting" % choice
+ sys.exit(1)
+ if idx < 0 or idx >= len(config_items):
+ print "Invalid choice (%d), exiting" % (idx + 1)
+ sys.exit(1)
+ removed.append(config_items.pop(idx))
+
+ write_config_items(scripts_path, machine, config_items)
+
+ print "Removed items:"
+ for r in removed:
+ print "\t%s" % r
+
+
+def yocto_kernel_config_add(scripts_path, machine, config_items):
+ """
+ Add one or more config items (CONFIG_XXX) to a machine's
+ user-defined config fragment [${machine}-user-config.cfg].
+ """
+ new_items = []
+ dup_items = []
+
+ cur_items = read_config_items(scripts_path, machine)
+
+ for item in config_items:
+ if not item.startswith("CONFIG") or (not "=y" in item and not "=m" in item):
+ print "Invalid config item (%s), exiting" % item
+ sys.exit(1)
+ if item not in cur_items and item not in new_items:
+ new_items.append(item)
+ else:
+ dup_items.append(item)
+
+ if len(new_items) > 0:
+ cur_items.extend(new_items)
+ write_config_items(scripts_path, machine, cur_items)
+ print "Added item%s:" % ("" if len(new_items)==1 else "s")
+ for n in new_items:
+ print "\t%s" % n
+
+ if len(dup_items) > 0:
+ output="The following item%s already exist%s in the current configuration, ignoring %s:" % \
+ (("","s", "it") if len(dup_items)==1 else ("s", "", "them" ))
+ print output
+ for n in dup_items:
+ print "\t%s" % n
+
+def find_current_kernel(bsp_layer, machine):
+ """
+ Determine the kernel and version currently being used in the BSP.
+ """
+ machine_conf = os.path.join(bsp_layer, "conf/machine/" + machine + ".conf")
+
+ preferred_kernel = preferred_kernel_version = preferred_version_varname = None
+
+ f = open(machine_conf, "r")
+ lines = f.readlines()
+ for line in lines:
+ if line.strip().startswith("PREFERRED_PROVIDER_virtual/kernel"):
+ preferred_kernel = line.split()[-1]
+ preferred_kernel = preferred_kernel.replace('\"','')
+ preferred_version_varname = "PREFERRED_VERSION_" + preferred_kernel
+ if preferred_version_varname and line.strip().startswith(preferred_version_varname):
+ preferred_kernel_version = line.split()[-1]
+ preferred_kernel_version = preferred_kernel_version.replace('\"','')
+ preferred_kernel_version = preferred_kernel_version.replace('%','')
+
+ if preferred_kernel and preferred_kernel_version:
+ return preferred_kernel + "_" + preferred_kernel_version
+ elif preferred_kernel:
+ return preferred_kernel
+
+
+def find_filesdir(scripts_path, machine):
+ """
+ Find the name of the 'files' dir associated with the machine
+ (could be in files/, linux-yocto-custom/, etc). Returns the name
+ of the files dir if found, None otherwise.
+ """
+ layer = find_bsp_layer(machine)
+ filesdir = None
+ linuxdir = os.path.join(layer, "recipes-kernel/linux")
+ linuxdir_list = os.listdir(linuxdir)
+ for fileobj in linuxdir_list:
+ fileobj_path = os.path.join(linuxdir, fileobj)
+ if os.path.isdir(fileobj_path):
+ # this could be files/ or linux-yocto-custom/, we have no way of distinguishing
+ # so we take the first (and normally only) dir we find as the 'filesdir'
+ filesdir = fileobj_path
+
+ return filesdir
+
+
+def read_patch_items(scripts_path, machine):
+ """
+ Find and return a list of patch items in a machine's user-defined
+ patch list [${machine}-user-patches.scc].
+ """
+ patch_items = []
+
+ f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "r")
+ lines = f.readlines()
+ for line in lines:
+ s = line.strip()
+ if s and not s.startswith("#"):
+ fields = s.split()
+ if not fields[0] == "patch":
+ continue
+ patch_items.append(fields[1])
+ f.close()
+
+ return patch_items
+
+
+def write_patch_items(scripts_path, machine, patch_items):
+ """
+ Write (replace) the list of patches in a machine's user-defined
+ patch list [${machine}-user-patches.scc].
+ """
+ f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "w")
+ f.write("mark patching start\n")
+ for item in patch_items:
+ f.write("patch " + item + "\n")
+ f.close()
+
+ kernel_contents_changed(scripts_path, machine)
+
+
+def yocto_kernel_patch_list(scripts_path, machine):
+ """
+ Display the list of patches in a machine's user-defined patch list
+ [${machine}-user-patches.scc].
+ """
+ patches = read_patch_items(scripts_path, machine)
+
+ print "The current set of machine-specific patches for %s is:" % machine
+ print gen_choices_str(patches)
+
+
+def yocto_kernel_patch_rm(scripts_path, machine):
+ """
+ Remove one or more patches from a machine's user-defined patch
+ list [${machine}-user-patches.scc].
+ """
+ patches = read_patch_items(scripts_path, machine)
+
+ print "Specify the patches to remove:"
+ input = raw_input(gen_choices_str(patches))
+ rm_choices = input.split()
+ rm_choices.sort()
+
+ removed = []
+
+ filesdir = find_filesdir(scripts_path, machine)
+ if not filesdir:
+ print "Couldn't rm patch(es) since we couldn't find a 'files' dir"
+ sys.exit(1)
+
+ for choice in reversed(rm_choices):
+ try:
+ idx = int(choice) - 1
+ except ValueError:
+ print "Invalid choice (%s), exiting" % choice
+ sys.exit(1)
+ if idx < 0 or idx >= len(patches):
+ print "Invalid choice (%d), exiting" % (idx + 1)
+ sys.exit(1)
+ filesdir_patch = os.path.join(filesdir, patches[idx])
+ if os.path.isfile(filesdir_patch):
+ os.remove(filesdir_patch)
+ removed.append(patches[idx])
+ patches.pop(idx)
+
+ write_patch_items(scripts_path, machine, patches)
+
+ print "Removed patches:"
+ for r in removed:
+ print "\t%s" % r
+
+
+def yocto_kernel_patch_add(scripts_path, machine, patches):
+ """
+ Add one or more patches to a machine's user-defined patch list
+ [${machine}-user-patches.scc].
+ """
+ existing_patches = read_patch_items(scripts_path, machine)
+
+ for patch in patches:
+ if os.path.basename(patch) in existing_patches:
+ print "Couldn't add patch (%s) since it's already been added" % os.path.basename(patch)
+ sys.exit(1)
+
+ filesdir = find_filesdir(scripts_path, machine)
+ if not filesdir:
+ print "Couldn't add patch (%s) since we couldn't find a 'files' dir to add it to" % os.path.basename(patch)
+ sys.exit(1)
+
+ new_patches = []
+
+ for patch in patches:
+ if not os.path.isfile(patch):
+ print "Couldn't find patch (%s), exiting" % patch
+ sys.exit(1)
+ basename = os.path.basename(patch)
+ filesdir_patch = os.path.join(filesdir, basename)
+ shutil.copyfile(patch, filesdir_patch)
+ new_patches.append(basename)
+
+ cur_items = read_patch_items(scripts_path, machine)
+ cur_items.extend(new_patches)
+ write_patch_items(scripts_path, machine, cur_items)
+
+ print "Added patches:"
+ for n in new_patches:
+ print "\t%s" % n
+
+
+def inc_pr(line):
+ """
+ Add 1 to the PR value in the given bbappend PR line. For the PR
+ lines in kernel .bbappends after modifications. Handles PRs of
+ the form PR := "${PR}.1" as well as PR = "r0".
+ """
+ idx = line.find("\"")
+
+ pr_str = line[idx:]
+ pr_str = pr_str.replace('\"','')
+ fields = pr_str.split('.')
+ if len(fields) > 1:
+ fields[1] = str(int(fields[1]) + 1)
+ pr_str = "\"" + '.'.join(fields) + "\"\n"
+ else:
+ pr_val = pr_str[1:]
+ pr_str = "\"" + "r" + str(int(pr_val) + 1) + "\"\n"
+ idx2 = line.find("\"", idx + 1)
+ line = line[:idx] + pr_str
+
+ return line
+
+
+def kernel_contents_changed(scripts_path, machine):
+ """
+ Do what we need to do to notify the system that the kernel
+ recipe's contents have changed.
+ """
+ layer = find_bsp_layer(machine)
+
+ kernel = find_current_kernel(layer, machine)
+ if not kernel:
+ print "Couldn't determine the kernel for this BSP, exiting."
+ sys.exit(1)
+
+ kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bbappend")
+ if not os.path.isfile(kernel_bbfile):
+ kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bb")
+ if not os.path.isfile(kernel_bbfile):
+ return
+ kernel_bbfile_prev = kernel_bbfile + ".prev"
+ shutil.copyfile(kernel_bbfile, kernel_bbfile_prev)
+
+ ifile = open(kernel_bbfile_prev, "r")
+ ofile = open(kernel_bbfile, "w")
+ ifile_lines = ifile.readlines()
+ for ifile_line in ifile_lines:
+ if ifile_line.strip().startswith("PR"):
+ ifile_line = inc_pr(ifile_line)
+ ofile.write(ifile_line)
+ ofile.close()
+ ifile.close()
+
+
+def kernels(context):
+ """
+ Return the list of available kernels in the BSP i.e. corresponding
+ to the kernel .bbappends found in the layer.
+ """
+ archdir = os.path.join(context["scripts_path"], "lib/bsp/substrate/target/arch/" + context["arch"])
+ kerndir = os.path.join(archdir, "recipes-kernel/linux")
+ bbglob = os.path.join(kerndir, "*.bbappend")
+
+ bbappends = glob.glob(bbglob)
+
+ kernels = []
+
+ for kernel in bbappends:
+ filename = os.path.splitext(os.path.basename(kernel))[0]
+ idx = filename.find(CLOSE_TAG)
+ if idx != -1:
+ filename = filename[idx + len(CLOSE_TAG):].strip()
+ kernels.append(filename)
+
+ kernels.append("custom")
+
+ return kernels
+
+
+def extract_giturl(file):
+ """
+ Extract the git url of the kernel repo from the kernel recipe's
+ SRC_URI.
+ """
+ url = None
+ f = open(file, "r")
+ lines = f.readlines()
+ for line in lines:
+ line = line.strip()
+ if line.startswith("SRC_URI"):
+ line = line[len("SRC_URI"):].strip()
+ if line.startswith("="):
+ line = line[1:].strip()
+ if line.startswith("\""):
+ line = line[1:].strip()
+ prot = "git"
+ for s in line.split(";"):
+ if s.startswith("git://"):
+ url = s
+ if s.startswith("protocol="):
+ prot = s.split("=")[1]
+ if url:
+ url = prot + url[3:]
+ return url
+
+
+def find_giturl(context):
+ """
+ Find the git url of the kernel repo from the kernel recipe's
+ SRC_URI.
+ """
+ name = context["name"]
+ filebase = context["filename"]
+ scripts_path = context["scripts_path"]
+
+ meta_layer = find_meta_layer()
+
+ kerndir = os.path.join(meta_layer, "recipes-kernel/linux")
+ bbglob = os.path.join(kerndir, "*.bb")
+ bbs = glob.glob(bbglob)
+ for kernel in bbs:
+ filename = os.path.splitext(os.path.basename(kernel))[0]
+ if filename in filebase:
+ giturl = extract_giturl(kernel)
+ return giturl
+
+ return None
+
+
+def read_features(scripts_path, machine):
+ """
+ Find and return a list of features in a machine's user-defined
+ features fragment [${machine}-user-features.scc].
+ """
+ features = []
+
+ f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "r")
+ lines = f.readlines()
+ for line in lines:
+ s = line.strip()
+ if s and not s.startswith("#"):
+ feature_include = s.split()
+ features.append(feature_include[1].strip())
+ f.close()
+
+ return features
+
+
+def write_features(scripts_path, machine, features):
+ """
+ Write (replace) the list of feature items in a
+ machine's user-defined features fragment [${machine}=user-features.cfg].
+ """
+ f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "w")
+ for item in features:
+ f.write("include " + item + "\n")
+ f.close()
+
+ kernel_contents_changed(scripts_path, machine)
+
+
+def yocto_kernel_feature_list(scripts_path, machine):
+ """
+ Display the list of features used in a machine's user-defined
+ features fragment [${machine}-user-features.scc].
+ """
+ features = read_features(scripts_path, machine)
+
+ print "The current set of machine-specific features for %s is:" % machine
+ print gen_choices_str(features)
+
+
+def yocto_kernel_feature_rm(scripts_path, machine):
+ """
+ Display the list of features used in a machine's user-defined
+ features fragment [${machine}-user-features.scc], prompt the user
+ for one or more to remove, and remove them.
+ """
+ features = read_features(scripts_path, machine)
+
+ print "Specify the features to remove:"
+ input = raw_input(gen_choices_str(features))
+ rm_choices = input.split()
+ rm_choices.sort()
+
+ removed = []
+
+ for choice in reversed(rm_choices):
+ try:
+ idx = int(choice) - 1
+ except ValueError:
+ print "Invalid choice (%s), exiting" % choice
+ sys.exit(1)
+ if idx < 0 or idx >= len(features):
+ print "Invalid choice (%d), exiting" % (idx + 1)
+ sys.exit(1)
+ removed.append(features.pop(idx))
+
+ write_features(scripts_path, machine, features)
+
+ print "Removed features:"
+ for r in removed:
+ print "\t%s" % r
+
+
+def yocto_kernel_feature_add(scripts_path, machine, features):
+ """
+ Add one or more features a machine's user-defined features
+ fragment [${machine}-user-features.scc].
+ """
+ new_items = []
+
+ for item in features:
+ if not item.endswith(".scc"):
+ print "Invalid feature (%s), exiting" % item
+ sys.exit(1)
+ new_items.append(item)
+
+ cur_items = read_features(scripts_path, machine)
+ cur_items.extend(new_items)
+
+ write_features(scripts_path, machine, cur_items)
+
+ print "Added features:"
+ for n in new_items:
+ print "\t%s" % n
+
+
+def find_feature_url(git_url):
+ """
+ Find the url of the kern-features.rc kernel for the kernel repo
+ specified from the BSP's kernel recipe SRC_URI.
+ """
+ feature_url = ""
+ if git_url.startswith("git://"):
+ git_url = git_url[len("git://"):].strip()
+ s = git_url.split("/")
+ if s[1].endswith(".git"):
+ s[1] = s[1][:len(s[1]) - len(".git")]
+ feature_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \
+ "/plain/meta/cfg/kern-features.rc?h=meta"
+
+ return feature_url
+
+
+def find_feature_desc(lines):
+ """
+ Find the feature description and compatibility in the passed-in
+ set of lines. Returns a string string of the form 'desc
+ [compat]'.
+ """
+ desc = "no description available"
+ compat = "unknown"
+
+ for line in lines:
+ idx = line.find("KFEATURE_DESCRIPTION")
+ if idx != -1:
+ desc = line[idx + len("KFEATURE_DESCRIPTION"):].strip()
+ if desc.startswith("\""):
+ desc = desc[1:]
+ if desc.endswith("\""):
+ desc = desc[:-1]
+ else:
+ idx = line.find("KFEATURE_COMPATIBILITY")
+ if idx != -1:
+ compat = line[idx + len("KFEATURE_COMPATIBILITY"):].strip()
+
+ return desc + " [" + compat + "]"
+
+
+def print_feature_descs(layer, feature_dir):
+ """
+ Print the feature descriptions for the features in feature_dir.
+ """
+ kernel_files_features = os.path.join(layer, "recipes-kernel/linux/files/" +
+ feature_dir)
+ for root, dirs, files in os.walk(kernel_files_features):
+ for file in files:
+ if file.endswith("~") or file.endswith("#"):
+ continue
+ if file.endswith(".scc"):
+ fullpath = os.path.join(layer, "recipes-kernel/linux/files/" +
+ feature_dir + "/" + file)
+ f = open(fullpath)
+ feature_desc = find_feature_desc(f.readlines())
+ print feature_dir + "/" + file + ": " + feature_desc
+
+
+def yocto_kernel_available_features_list(scripts_path, machine):
+ """
+ Display the list of all the kernel features available for use in
+ BSPs, as gathered from the set of feature sources.
+ """
+ layer = find_bsp_layer(machine)
+ kernel = find_current_kernel(layer, machine)
+ if not kernel:
+ print "Couldn't determine the kernel for this BSP, exiting."
+ sys.exit(1)
+
+ context = create_context(machine, "arch", scripts_path)
+ context["name"] = "name"
+ context["filename"] = kernel
+ giturl = find_giturl(context)
+ feature_url = find_feature_url(giturl)
+
+ feature_cmd = "wget -q -O - " + feature_url
+ tmp = subprocess.Popen(feature_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+
+ print "The current set of kernel features available to %s is:\n" % machine
+
+ if tmp:
+ tmpline = tmp.split("\n")
+ in_kernel_options = False
+ for line in tmpline:
+ if not "=" in line:
+ if in_kernel_options:
+ break
+ if "kernel-options" in line:
+ in_kernel_options = True
+ continue
+ if in_kernel_options:
+ feature_def = line.split("=")
+ feature_type = feature_def[0].strip()
+ feature = feature_def[1].strip()
+ desc = get_feature_desc(giturl, feature)
+ print "%s: %s" % (feature, desc)
+
+ print "[local]"
+
+ print_feature_descs(layer, "cfg")
+ print_feature_descs(layer, "features")
+
+
+def find_feature_desc_url(git_url, feature):
+ """
+ Find the url of the kernel feature in the kernel repo specified
+ from the BSP's kernel recipe SRC_URI.
+ """
+ feature_desc_url = ""
+ if git_url.startswith("git://"):
+ git_url = git_url[len("git://"):].strip()
+ s = git_url.split("/")
+ if s[1].endswith(".git"):
+ s[1] = s[1][:len(s[1]) - len(".git")]
+ feature_desc_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \
+ "/plain/meta/cfg/kernel-cache/" + feature + "?h=meta"
+
+ return feature_desc_url
+
+
+def get_feature_desc(git_url, feature):
+ """
+ Return a feature description of the form 'description [compatibility]
+ BSPs, as gathered from the set of feature sources.
+ """
+ feature_desc_url = find_feature_desc_url(git_url, feature)
+ feature_desc_cmd = "wget -q -O - " + feature_desc_url
+ tmp = subprocess.Popen(feature_desc_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+
+ return find_feature_desc(tmp.split("\n"))
+
+
+def yocto_kernel_feature_describe(scripts_path, machine, feature):
+ """
+ Display the description of a specific kernel feature available for
+ use in a BSP.
+ """
+ layer = find_bsp_layer(machine)
+
+ kernel = find_current_kernel(layer, machine)
+ if not kernel:
+ print "Couldn't determine the kernel for this BSP, exiting."
+ sys.exit(1)
+
+ context = create_context(machine, "arch", scripts_path)
+ context["name"] = "name"
+ context["filename"] = kernel
+ giturl = find_giturl(context)
+
+ desc = get_feature_desc(giturl, feature)
+
+ print desc
+
+
+def check_feature_name(feature_name):
+ """
+ Sanity-check the feature name for create/destroy. Return False if not OK.
+ """
+ if not feature_name.endswith(".scc"):
+ print "Invalid feature name (must end with .scc) [%s], exiting" % feature_name
+ return False
+
+ if "/" in feature_name:
+ print "Invalid feature name (don't specify directory) [%s], exiting" % feature_name
+ return False
+
+ return True
+
+
+def check_create_input(feature_items):
+ """
+ Sanity-check the create input. Return False if not OK.
+ """
+ if not check_feature_name(feature_items[0]):
+ return False
+
+ if feature_items[1].endswith(".patch") or feature_items[1].startswith("CONFIG_"):
+ print "Missing description and/or compatibilty [%s], exiting" % feature_items[1]
+ return False
+
+ if feature_items[2].endswith(".patch") or feature_items[2].startswith("CONFIG_"):
+ print "Missing description and/or compatibility [%s], exiting" % feature_items[1]
+ return False
+
+ return True
+
+
+def yocto_kernel_feature_create(scripts_path, machine, feature_items):
+ """
+ Create a recipe-space kernel feature in a BSP.
+ """
+ if not check_create_input(feature_items):
+ sys.exit(1)
+
+ feature = feature_items[0]
+ feature_basename = feature.split(".")[0]
+ feature_description = feature_items[1]
+ feature_compat = feature_items[2]
+
+ patches = []
+ cfg_items = []
+
+ for item in feature_items[3:]:
+ if item.endswith(".patch"):
+ patches.append(item)
+ elif item.startswith("CONFIG"):
+ if ("=y" in item or "=m" in item):
+ cfg_items.append(item)
+ else:
+ print "Invalid feature item (must be .patch or CONFIG_*) [%s], exiting" % item
+ sys.exit(1)
+
+ feature_dirname = "cfg"
+ if patches:
+ feature_dirname = "features"
+
+ filesdir = find_filesdir(scripts_path, machine)
+ if not filesdir:
+ print "Couldn't add feature (%s), no 'files' dir found" % feature
+ sys.exit(1)
+
+ featdir = os.path.join(filesdir, feature_dirname)
+ if not os.path.exists(featdir):
+ os.mkdir(featdir)
+
+ for patch in patches:
+ if not os.path.isfile(patch):
+ print "Couldn't find patch (%s), exiting" % patch
+ sys.exit(1)
+ basename = os.path.basename(patch)
+ featdir_patch = os.path.join(featdir, basename)
+ shutil.copyfile(patch, featdir_patch)
+
+ new_cfg_filename = os.path.join(featdir, feature_basename + ".cfg")
+ new_cfg_file = open(new_cfg_filename, "w")
+ for cfg_item in cfg_items:
+ new_cfg_file.write(cfg_item + "\n")
+ new_cfg_file.close()
+
+ new_feature_filename = os.path.join(featdir, feature_basename + ".scc")
+ new_feature_file = open(new_feature_filename, "w")
+ new_feature_file.write("define KFEATURE_DESCRIPTION \"" + feature_description + "\"\n")
+ new_feature_file.write("define KFEATURE_COMPATIBILITY " + feature_compat + "\n\n")
+
+ for patch in patches:
+ patch_dir, patch_file = os.path.split(patch)
+ new_feature_file.write("patch " + patch_file + "\n")
+
+ new_feature_file.write("kconf non-hardware " + feature_basename + ".cfg\n")
+ new_feature_file.close()
+
+ print "Added feature:"
+ print "\t%s" % feature_dirname + "/" + feature
+
+
+def feature_in_use(scripts_path, machine, feature):
+ """
+ Determine whether the specified feature is in use by the BSP.
+ Return True if so, False otherwise.
+ """
+ features = read_features(scripts_path, machine)
+ for f in features:
+ if f == feature:
+ return True
+ return False
+
+
+def feature_remove(scripts_path, machine, feature):
+ """
+ Remove the specified feature from the available recipe-space
+ features defined for the BSP.
+ """
+ features = read_features(scripts_path, machine)
+ new_features = []
+ for f in features:
+ if f == feature:
+ continue
+ new_features.append(f)
+ write_features(scripts_path, machine, new_features)
+
+
+def yocto_kernel_feature_destroy(scripts_path, machine, feature):
+ """
+ Remove a recipe-space kernel feature from a BSP.
+ """
+ if not check_feature_name(feature):
+ sys.exit(1)
+
+ if feature_in_use(scripts_path, machine, "features/" + feature) or \
+ feature_in_use(scripts_path, machine, "cfg/" + feature):
+ print "Feature %s is in use (use 'feature rm' to un-use it first), exiting" % feature
+ sys.exit(1)
+
+ filesdir = find_filesdir(scripts_path, machine)
+ if not filesdir:
+ print "Couldn't destroy feature (%s), no 'files' dir found" % feature
+ sys.exit(1)
+
+ feature_dirname = "features"
+ featdir = os.path.join(filesdir, feature_dirname)
+ if not os.path.exists(featdir):
+ print "Couldn't find feature directory (%s)" % feature_dirname
+ sys.exit(1)
+
+ feature_fqn = os.path.join(featdir, feature)
+ if not os.path.exists(feature_fqn):
+ feature_dirname = "cfg"
+ featdir = os.path.join(filesdir, feature_dirname)
+ if not os.path.exists(featdir):
+ print "Couldn't find feature directory (%s)" % feature_dirname
+ sys.exit(1)
+ feature_fqn = os.path.join(featdir, feature_filename)
+ if not os.path.exists(feature_fqn):
+ print "Couldn't find feature (%s)" % feature
+ sys.exit(1)
+
+ f = open(feature_fqn, "r")
+ lines = f.readlines()
+ for line in lines:
+ s = line.strip()
+ if s.startswith("patch ") or s.startswith("kconf "):
+ split_line = s.split()
+ filename = os.path.join(featdir, split_line[-1])
+ if os.path.exists(filename):
+ os.remove(filename)
+ f.close()
+ os.remove(feature_fqn)
+
+ feature_remove(scripts_path, machine, feature)
+
+ print "Removed feature:"
+ print "\t%s" % feature_dirname + "/" + feature
+
+
+def base_branches(context):
+ """
+ Return a list of the base branches found in the kernel git repo.
+ """
+ giturl = find_giturl(context)
+
+ print "Getting branches from remote repo %s..." % giturl
+
+ gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
+ tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+
+ branches = []
+
+ if tmp:
+ tmpline = tmp.split("\n")
+ for line in tmpline:
+ if len(line)==0:
+ break;
+ if not line.endswith("base"):
+ continue;
+ idx = line.find("refs/heads/")
+ kbranch = line[idx + len("refs/heads/"):]
+ if kbranch.find("/") == -1 and kbranch.find("base") == -1:
+ continue
+ idx = kbranch.find("base")
+ branches.append(kbranch[:idx - 1])
+
+ return branches
+
+
+def all_branches(context):
+ """
+ Return a list of all the branches found in the kernel git repo.
+ """
+ giturl = find_giturl(context)
+
+ print "Getting branches from remote repo %s..." % giturl
+
+ gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
+ tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+
+ branches = []
+
+ base_prefixes = None
+
+ try:
+ branches_base = context["branches_base"]
+ if branches_base:
+ base_prefixes = branches_base.split(":")
+ except KeyError:
+ pass
+
+ arch = context["arch"]
+
+ if tmp:
+ tmpline = tmp.split("\n")
+ for line in tmpline:
+ if len(line)==0:
+ break;
+ idx = line.find("refs/heads/")
+ kbranch = line[idx + len("refs/heads/"):]
+ kbranch_prefix = kbranch.rsplit("/", 1)[0]
+
+ if base_prefixes:
+ for base_prefix in base_prefixes:
+ if kbranch_prefix == base_prefix:
+ branches.append(kbranch)
+ continue
+
+ if (kbranch.find("/") != -1 and
+ (kbranch.find("standard") != -1 or kbranch.find("base") != -1) or
+ kbranch == "base"):
+ branches.append(kbranch)
+ continue
+
+ return branches
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/.gitignore b/scripts/lib/bsp/substrate/target/arch/arm/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf
new file mode 100644
index 0000000..588367a
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf
@@ -0,0 +1,102 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+{{ if xserver == "y": }}
+PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
+XSERVER ?= "xserver-xorg \
+ xf86-input-evdev \
+ xf86-input-mouse \
+ xf86-video-fbdev \
+ xf86-input-keyboard"
+
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules kernel-devicetree"
+
+EXTRA_IMAGEDEPENDS += "u-boot"
+
+{{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_cortexa8" }}
+{{ input type:"choice" val:"tune_arm1136jf_s" msg:"arm1136jf-s tuning optimizations" }}
+{{ input type:"choice" val:"tune_arm920t" msg:"arm920t tuning optimizations" }}
+{{ input type:"choice" val:"tune_arm926ejs" msg:"arm926ejs tuning optimizations" }}
+{{ input type:"choice" val:"tune_arm9tdmi" msg:"arm9tdmi tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexa5" msg:"cortexa5 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexa7" msg:"cortexa7 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexa8" msg:"cortexa8 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexa9" msg:"cortexa9 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexa15" msg:"cortexa15 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexm1" msg:"cortexm1 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexm3" msg:"cortexm3 tuning optimizations" }}
+{{ input type:"choice" val:"tune_cortexr4" msg:"cortexr4 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ep9312" msg:"ep9312 tuning optimizations" }}
+{{ input type:"choice" val:"tune_iwmmxt" msg:"iwmmxt tuning optimizations" }}
+{{ input type:"choice" val:"tune_strongarm1100" msg:"strongarm1100 tuning optimizations" }}
+{{ input type:"choice" val:"tune_xscale" msg:"xscale tuning optimizations" }}
+{{ if tunefile == "tune_arm1136jf_s": }}
+include conf/machine/include/tune-arm1136jf-s.inc
+{{ if tunefile == "tune_arm920t": }}
+include conf/machine/include/tune-arm920t.inc
+{{ if tunefile == "tune_arm926ejs": }}
+include conf/machine/include/tune-arm926ejs.inc
+{{ if tunefile == "tune_arm9tdmi": }}
+include conf/machine/include/tune-arm9tdmi.inc
+{{ if tunefile == "tune_cortexa5": }}
+include conf/machine/include/tune-cortexa5.inc
+{{ if tunefile == "tune_cortexa7": }}
+include conf/machine/include/tune-cortexa7.inc
+{{ if tunefile == "tune_cortexa8": }}
+DEFAULTTUNE ?= "cortexa8hf-neon"
+include conf/machine/include/tune-cortexa8.inc
+{{ if tunefile == "tune_cortexa9": }}
+include conf/machine/include/tune-cortexa9.inc
+{{ if tunefile == "tune_cortexa15": }}
+include conf/machine/include/tune-cortexa15.inc
+{{ if tunefile == "tune_cortexm1": }}
+include conf/machine/include/tune-cortexm1.inc
+{{ if tunefile == "tune_cortexm3": }}
+include conf/machine/include/tune-cortexm3.inc
+{{ if tunefile == "tune_cortexr4": }}
+include conf/machine/include/tune-cortexr4.inc
+{{ if tunefile == "tune_ep9312": }}
+include conf/machine/include/tune-ep9312.inc
+{{ if tunefile == "tune_iwmmxt": }}
+include conf/machine/include/tune-iwmmxt.inc
+{{ if tunefile == "tune_strongarm1100": }}
+include conf/machine/include/tune-strongarm1100.inc
+{{ if tunefile == "tune_xscale": }}
+include conf/machine/include/tune-xscale.inc
+
+IMAGE_FSTYPES += "tar.bz2 jffs2"
+EXTRA_IMAGECMD_jffs2 = "-lnp "
+
+SERIAL_CONSOLE = "115200 ttyO0"
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+KERNEL_IMAGETYPE = "uImage"
+KERNEL_DEVICETREE = "am335x-bone.dtb am335x-boneblack.dtb"
+KERNEL_EXTRA_ARGS += "LOADADDR=${UBOOT_ENTRYPOINT}"
+
+SPL_BINARY = "MLO"
+UBOOT_SUFFIX = "img"
+{{ input type:"edit" name:"uboot_machine" prio:"40" msg:"Please specify a value for UBOOT_MACHINE:" default:"am335x_evm_config" }}
+UBOOT_MACHINE = "{{=uboot_machine}}"
+{{ input type:"edit" name:"uboot_entrypoint" prio:"40" msg:"Please specify a value for UBOOT_ENTRYPOINT:" default:"0x80008000" }}
+UBOOT_ENTRYPOINT = "{{=uboot_entrypoint}}"
+{{ input type:"edit" name:"uboot_loadaddress" prio:"40" msg:"Please specify a value for UBOOT_LOADADDRESS:" default:"0x80008000" }}
+UBOOT_LOADADDRESS = "{{=uboot_loadaddress}}"
+
+MACHINE_FEATURES = "usbgadget usbhost vfat alsa"
+
+IMAGE_BOOT_FILES ?= "u-boot.${UBOOT_SUFFIX} MLO"
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
new file mode 100644
index 0000000..bc52893
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
+Section "Module"
+ Load "extmod"
+ Load "dbe"
+ Load "glx"
+ Load "freetype"
+ Load "type1"
+ Load "record"
+ Load "dri"
+EndSection
+
+Section "Monitor"
+ Identifier "Builtin Default Monitor"
+EndSection
+
+Section "Device"
+ Identifier "Builtin Default fbdev Device 0"
+ Driver "omapfb"
+EndSection
+
+Section "Screen"
+ Identifier "Builtin Default fbdev Screen 0"
+ Device "Builtin Default fbdev Device 0"
+ Monitor "Builtin Default Monitor"
+EndSection
+
+Section "ServerLayout"
+ Identifier "Builtin Default Layout"
+ Screen "Builtin Default fbdev Screen 0"
+EndSection
+
+Section "ServerFlags"
+ Option "DontZap" "0"
+EndSection
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
new file mode 100644
index 0000000..3083003
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
@@ -0,0 +1,2 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-non_hardware.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-non_hardware.cfg
new file mode 100644
index 0000000..9bfc90c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-non_hardware.cfg
@@ -0,0 +1,31 @@
+# yocto-bsp-filename {{=machine}}-non_hardware.cfg
+#
+# Miscellaneous filesystems
+#
+CONFIG_NFS_DEF_FILE_IO_SIZE=1024
+
+#
+# Multiple Device Support
+#
+# CONFIG_MD is not set
+
+# Kernel Features
+#
+CONFIG_NO_HZ=y
+
+#
+# CPUIdle
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_FS=y
+
+#
+# Power management options
+#
+CONFIG_PM_DEBUG=y
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..ea6966c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,15 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH arm
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for preempt-rt kernels
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..405972d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,15 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH arm
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for standard kernels
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..921b7e7
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH arm
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..47489e4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..582759e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..97f747f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..a2e1ae0
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,321 @@
+# yocto-bsp-filename {{=machine}}.cfg
+#
+# System Type
+#
+CONFIG_ARCH_OMAP=y
+
+#
+# TI OMAP Implementations
+#
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+
+#
+# TI OMAP Common Features
+#
+CONFIG_ARCH_OMAP2PLUS=y
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_SMARTREFLEX_CLASS3=y
+CONFIG_OMAP_MBOX_FWK=m
+CONFIG_OMAP_MBOX_KFIFO_SIZE=256
+
+#
+# OMAP Board Type
+#
+CONFIG_MACH_OMAP3_BEAGLE=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_ERRATA_430973=y
+
+#
+# Kernel Features
+#
+CONFIG_LEDS=y
+
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+CONFIG_NEON=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_RUNTIME=y
+
+#
+# Generic Driver Options
+#
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+
+#
+# Disk-On-Chip Device Drivers
+#
+CONFIG_MTD_NAND=y
+
+CONFIG_MTD_NAND_OMAP2=y
+
+CONFIG_MTD_UBI=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_SMSC911X=y
+CONFIG_USB_NET_SMSC95XX=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_EVDEV=y
+
+#
+# Input Device Drivers
+#
+CONFIG_KEYBOARD_TWL4030=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_I2C=y
+CONFIG_I2C_OMAP=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_OMAP24XX=y
+
+#
+# I2C GPIO expanders:
+#
+CONFIG_GPIO_TWL4030=y
+
+#
+# SPI GPIO expanders:
+#
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Multifunction device drivers
+#
+CONFIG_TWL4030_CORE=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DUMMY=y
+CONFIG_REGULATOR_TWL4030=y
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_DRM=m
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=14
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+CONFIG_OMAP2_DSS_DPI=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+CONFIG_OMAP2_DSS_DSI=y
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=2
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC_DPI=y
+CONFIG_PANEL_DVI=y
+CONFIG_PANEL_SHARP_LS037V7DW01=y
+# CONFIG_PANEL_LGPHILIPS_LB035Q02 is not set
+# CONFIG_PANEL_TAAL is not set
+CONFIG_PANEL_TPO_TD043MTEA1=m
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+
+#
+# Console display driver support
+#
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_VGA_CONSOLE is not set
+
+# DMA Devices
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
+CONFIG_DMA_OF=y
+
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_OMAP_TWL4030=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB=y
+CONFIG_USB_SUPPORT=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_OMAP2PLUS=y
+CONFIG_USB_OMAP=y
+
+#
+# OMAP 343x high speed USB support
+#
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+CONFIG_USB_INVENTRA_DMA=y
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_OTG_UTILS=y
+CONFIG_TWL4030_USB=y
+
+# USB gadget modules
+CONFIG_USB_G_NCM=y
+CONFIG_USB_MASS_STORAGE=y
+
+CONFIG_MMC=y
+
+#
+# MMC/SD Host Controller Drivers
+#
+CONFIG_MMC_OMAP_HS=y
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TWL4030=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_VFAT_FS=y
+
+#
+# Multimedia core support
+#
+
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+
+#
+# Advanced Power Management Emulation support
+#
+CONFIG_APM_EMULATION=y
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..828400d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,8 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+kconf non-hardware {{machine}}-non_hardware.cfg
+
+include features/usb-net/usb-net.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..2d3d073
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1" \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..b88a06c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT b/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT
new file mode 100644
index 0000000..fb950dc
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/COPYING.MIT
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/scripts/lib/bsp/substrate/target/arch/common/README b/scripts/lib/bsp/substrate/target/arch/common/README
new file mode 100644
index 0000000..928659f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/README
@@ -0,0 +1,118 @@
+This README file contains information on building the meta-{{=machine}}
+BSP layer, and booting the images contained in the /binary directory.
+Please see the corresponding sections below for details.
+
+
+Dependencies
+============
+
+This layer depends on:
+
+ URI: git://git.openembedded.org/bitbake
+ branch: master
+
+ URI: git://git.openembedded.org/openembedded-core
+ layers: meta
+ branch: master
+
+ URI: git://git.yoctoproject.org/xxxx
+ layers: xxxx
+ branch: master
+
+
+Patches
+=======
+
+Please submit any patches against this BSP to the Yocto mailing list
+(yocto@yoctoproject.org) and cc: the maintainer:
+
+Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com>
+
+Please see the meta-xxxx/MAINTAINERS file for more details.
+
+
+Table of Contents
+=================
+
+ I. Building the meta-{{=machine}} BSP layer
+ II. Booting the images in /binary
+
+
+I. Building the meta-{{=machine}} BSP layer
+========================================
+
+--- replace with specific instructions for your layer ---
+
+In order to build an image with BSP support for a given release, you
+need to download the corresponding BSP tarball from the 'Board Support
+Package (BSP) Downloads' page of the Yocto Project website.
+
+Having done that, and assuming you extracted the BSP tarball contents
+at the top-level of your yocto build tree, you can build a
+{{=machine}} image by adding the location of the meta-{{=machine}}
+layer to bblayers.conf, along with any other layers needed (to access
+common metadata shared between BSPs) e.g.:
+
+ yocto/meta-xxxx \
+ yocto/meta-xxxx/meta-{{=machine}} \
+
+To enable the {{=machine}} layer, add the {{=machine}} MACHINE to local.conf:
+
+ MACHINE ?= "{{=machine}}"
+
+You should then be able to build a {{=machine}} image as such:
+
+ $ source oe-init-build-env
+ $ bitbake core-image-sato
+
+At the end of a successful build, you should have a live image that
+you can boot from a USB flash drive (see instructions on how to do
+that below, in the section 'Booting the images from /binary').
+
+As an alternative to downloading the BSP tarball, you can also work
+directly from the meta-xxxx git repository. For each BSP in the
+'meta-xxxx' repository, there are multiple branches, one corresponding
+to each major release starting with 'laverne' (0.90), in addition to
+the latest code which tracks the current master (note that not all
+BSPs are present in every release). Instead of extracting a BSP
+tarball at the top level of your yocto build tree, you can
+equivalently check out the appropriate branch from the meta-xxxx
+repository at the same location.
+
+
+II. Booting the images in /binary
+=================================
+
+--- replace with specific instructions for your platform ---
+
+This BSP contains bootable live images, which can be used to directly
+boot Yocto off of a USB flash drive.
+
+Under Linux, insert a USB flash drive. Assuming the USB flash drive
+takes device /dev/sdf, use dd to copy the live image to it. For
+example:
+
+# dd if=core-image-sato-{{=machine}}-20101207053738.hddimg of=/dev/sdf
+# sync
+# eject /dev/sdf
+
+This should give you a bootable USB flash device. Insert the device
+into a bootable USB socket on the target, and power on. This should
+result in a system booted to the Sato graphical desktop.
+
+If you want a terminal, use the arrows at the top of the UI to move to
+different pages of available applications, one of which is named
+'Terminal'. Clicking that should give you a root terminal.
+
+If you want to ssh into the system, you can use the root terminal to
+ifconfig the IP address and use that to ssh in. The root password is
+empty, so to log in type 'root' for the user name and hit 'Enter' at
+the Password prompt: and you should be in.
+
+----
+
+If you find you're getting corrupt images on the USB (it doesn't show
+the syslinux boot: prompt, or the boot: prompt contains strange
+characters), try doing this first:
+
+# dd if=/dev/zero of=/dev/sdf bs=1M count=512
diff --git a/scripts/lib/bsp/substrate/target/arch/common/README.sources b/scripts/lib/bsp/substrate/target/arch/common/README.sources
new file mode 100644
index 0000000..3c4cb7b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/README.sources
@@ -0,0 +1,17 @@
+The sources for the packages comprising the images shipped with this
+BSP can be found at the following location:
+
+http://downloads.yoctoproject.org/mirror/sources/
+
+The metadata used to generate the images shipped with this BSP, in
+addition to the code contained in this BSP, can be found at the
+following location:
+
+http://www.yoctoproject.org/downloads/yocto-1.1/poky-edison-6.0.tar.bz2
+
+The metadata used to generate the images shipped with this BSP, in
+addition to the code contained in this BSP, can also be found at the
+following locations:
+
+git://git.yoctoproject.org/poky.git
+git://git.yoctoproject.org/meta-xxxx
diff --git a/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore b/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/binary/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf b/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf
new file mode 100644
index 0000000..5529f45
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/conf/layer.conf
@@ -0,0 +1,10 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have a recipes-* directories, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "{{=machine}}"
+BBFILE_PATTERN_{{=machine}} = "^${LAYERDIR}/"
+BBFILE_PRIORITY_{{=machine}} = "6"
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine/machconfig b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine/machconfig
new file mode 100644
index 0000000..3b85d38
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor/machine/machconfig
@@ -0,0 +1,5 @@
+# Assume a USB mouse and keyboard are connected
+{{ input type:"boolean" name:"touchscreen" msg:"Does your BSP have a touchscreen? (y/n)" default:"n" }}
+HAVE_TOUCHSCREEN={{=touchscreen}}
+{{ input type:"boolean" name:"keyboard" msg:"Does your BSP have a keyboard? (y/n)" default:"y" }}
+HAVE_KEYBOARD={{=keyboard}}
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend
new file mode 100644
index 0000000..6d4804d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-bsp/formfactor/formfactor_0.0.bbappend
@@ -0,0 +1,2 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..663dddb
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,26 @@
+{{ if kernel_choice == "custom": }}
+{{ input type:"boolean" name:"custom_kernel_remote" prio:"20" msg:"Is the custom kernel you'd like to use in a remote git repo? (y/n)" default:"y"}}
+
+{{ if kernel_choice == "custom" and custom_kernel_remote == "y": }}
+{{ input type:"edit-git-repo" name:"custom_kernel_remote_path" prio:"20" msg:"Please enter the full URI to the remote git repo (the default corresponds to linux-stable v3.16.3)" default:"git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"}}
+
+{{ if kernel_choice == "custom" and custom_kernel_remote == "n": }}
+{{ input type:"edit-git-repo" name:"custom_kernel_local_path" prio:"20" msg:"You've indicated that you're not using a remote git repo. Please enter the full path to the local git repo you want to use (the default assumes a local linux-stable v3.16.3)" default:"/home/trz/yocto/kernels/linux-stable.git"}}
+
+{{ if kernel_choice == "custom": }}
+{{ input type:"boolean" name:"custom_kernel_need_kbranch" prio:"20" msg:"Do you need to use a specific (non-master) branch? (y/n)" default:"n"}}
+
+{{ if kernel_choice == "custom" and custom_kernel_need_kbranch == "y": }}
+{{ input type:"edit" name:"custom_kernel_kbranch" prio:"20" msg:"Please enter the branch you want to use (the default branch corresponds to the linux-stable 'linux-3.16.y' branch):" default:"linux-3.16.y"}}
+
+{{ if kernel_choice == "custom": }}
+{{ input type:"edit" name:"custom_kernel_srcrev" prio:"20" msg:"Please enter the SRCREV (commit id) you'd like to use (use '${AUTOREV}' to track the current HEAD):" default:"${AUTOREV}"}}
+
+{{ if kernel_choice == "custom": }}
+{{ input type:"edit" name:"custom_kernel_linux_version" prio:"20" msg:"Please enter the Linux version of the kernel you've specified:" default:"3.16.3"}}
+
+{{ if kernel_choice == "custom": }}
+{{ input type:"edit" name:"custom_kernel_linux_version_extension" prio:"20" msg:"Please enter a Linux version extension if you want (it will show up at the end of the kernel name shown by uname):" default:"-custom"}}
+
+{{ if kernel_choice == "custom": }}
+{{ input type:"edit-file" name:"custom_kernel_defconfig" prio:"20" msg:"It's recommended (but not required) that custom kernels be built using a defconfig. Please enter the full path to the defconfig for your kernel (NOTE: if you don't specify a defconfig the kernel probably won't build or boot):" default:""}}
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.bb b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.bb
new file mode 100644
index 0000000..fda955b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.bb
@@ -0,0 +1,57 @@
+# yocto-bsp-filename {{ if kernel_choice == "custom": }} this
+# This file was derived from the linux-yocto-custom.bb recipe in
+# oe-core.
+#
+# linux-yocto-custom.bb:
+#
+# A yocto-bsp-generated kernel recipe that uses the linux-yocto and
+# oe-core kernel classes to apply a subset of yocto kernel
+# management to git managed kernel repositories.
+#
+# Warning:
+#
+# Building this kernel without providing a defconfig or BSP
+# configuration will result in build or boot errors. This is not a
+# bug.
+#
+# Notes:
+#
+# patches: patches can be merged into to the source git tree itself,
+# added via the SRC_URI, or controlled via a BSP
+# configuration.
+#
+# example configuration addition:
+# SRC_URI += "file://smp.cfg"
+# example patch addition:
+# SRC_URI += "file://0001-linux-version-tweak.patch
+# example feature addition:
+# SRC_URI += "file://feature.scc"
+#
+
+inherit kernel
+require recipes-kernel/linux/linux-yocto.inc
+
+{{ if kernel_choice == "custom" and custom_kernel_remote == "y": }}
+SRC_URI = "{{=custom_kernel_remote_path}};protocol=git;bareclone=1;branch=${KBRANCH}"
+{{ if kernel_choice == "custom" and custom_kernel_remote == "n": }}
+SRC_URI = "git://{{=custom_kernel_local_path}};protocol=file;bareclone=1;branch=${KBRANCH}"
+
+SRC_URI += "file://defconfig"
+
+SRC_URI += "file://{{=machine}}.scc \
+ file://{{=machine}}.cfg \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ "
+
+{{ if kernel_choice == "custom" and custom_kernel_need_kbranch == "y" and custom_kernel_kbranch and custom_kernel_kbranch != "master": }}
+KBRANCH = "{{=custom_kernel_kbranch}}"
+
+LINUX_VERSION ?= "{{=custom_kernel_linux_version}}"
+LINUX_VERSION_EXTENSION ?= "{{=custom_kernel_linux_version_extension}}"
+
+SRCREV="{{=custom_kernel_srcrev}}"
+
+PV = "${LINUX_VERSION}+git${SRCPV}"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.noinstall b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.noinstall
new file mode 100644
index 0000000..017d206
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice == "custom": }} linux-yocto-custom
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/defconfig b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/defconfig
new file mode 100644
index 0000000..ceb0ffa
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/defconfig
@@ -0,0 +1,5 @@
+#
+# Placeholder for custom default kernel configuration. yocto-bsp will
+# replace this file with a user-specified defconfig.
+#
+{{ if custom_kernel_defconfig: replace_file(of, custom_kernel_defconfig) }}
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-config.cfg
new file mode 100644
index 0000000..922309d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-config.cfg
@@ -0,0 +1,9 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg
+#
+# Used by yocto-kernel to manage config options.
+#
+# yocto-kernel may change the contents of this file in any
+# way it sees fit, including removing comments like this,
+# so don't manually make any modifications you don't want
+# to lose.
+#
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-patches.scc
new file mode 100644
index 0000000..6d1138f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine-user-patches.scc
@@ -0,0 +1,9 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc
+#
+# Used by yocto-kernel to manage patches.
+#
+# yocto-kernel may change the contents of this file in any
+# way it sees fit, including removing comments like this,
+# so don't manually make any modifications you don't want
+# to lose.
+#
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.cfg b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.cfg
new file mode 100644
index 0000000..1ba8201
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.cfg
@@ -0,0 +1,4 @@
+# yocto-bsp-filename {{=machine}}.cfg
+#
+# A convenient place to add config options, nothing more.
+#
diff --git a/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.scc b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.scc
new file mode 100644
index 0000000..0b6b413
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/common/recipes-kernel/linux/linux-yocto-custom/machine.scc
@@ -0,0 +1,18 @@
+# yocto-bsp-filename {{=machine}}.scc
+#
+# The top-level 'feature' for the {{=machine}} custom kernel.
+#
+# Essentially this is a convenient top-level container or starting
+# point for adding lower-level config fragements and features.
+#
+
+# {{=machine}}.cfg in the linux-yocto-custom subdir is just a
+# convenient place for adding random config fragments.
+
+kconf hardware {{=machine}}.cfg
+
+# These are used by yocto-kernel to add config fragments and features.
+# Don't remove if you plan on using yocto-kernel with this BSP.
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf
new file mode 100644
index 0000000..d5abe4f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf
@@ -0,0 +1,77 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_core2" }}
+{{ input type:"choice" val:"tune_i586" msg:"i586 tuning optimizations" }}
+{{ input type:"choice" val:"tune_atom" msg:"Atom tuning optimizations" }}
+{{ input type:"choice" val:"tune_core2" msg:"Core2 tuning optimizations" }}
+{{ if tunefile == "tune_i586": }}
+require conf/machine/include/tune-i586.inc
+{{ if tunefile == "tune_atom": }}
+require conf/machine/include/tune-atom.inc
+{{ if tunefile == "tune_core2": }}
+DEFAULTTUNE="core2-32"
+require conf/machine/include/tune-core2.inc
+
+require conf/machine/include/x86-base.inc
+
+MACHINE_FEATURES += "wifi efi pcbios"
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+
+{{ if xserver == "y" and (kernel_choice == "linux-yocto_4.4" or kernel_choice == "linux-yocto_4.1"): }}
+{{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_vesa" }}
+{{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }}
+{{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }}
+{{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }}
+{{ input type:"choice" val:"xserver_fbdev" msg:"fbdev xserver support" }}
+{{ input type:"choice" val:"xserver_modesetting" msg:"modesetting xserver support" }}
+
+{{ if xserver == "y" and kernel_choice == "custom": }}
+{{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_vesa" }}
+{{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }}
+{{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }}
+{{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }}
+{{ input type:"choice" val:"xserver_fbdev" msg:"fbdev xserver support" }}
+{{ input type:"choice" val:"xserver_modesetting" msg:"modesetting xserver support" }}
+
+{{ if xserver == "y" and kernel_choice != "linux-yocto_4.4" and kernel_choice != "linux-yocto_4.1" and kernel_choice != "custom": xserver_choice = "xserver_i915" }}
+
+{{ if xserver == "y": }}
+XSERVER ?= "${XSERVER_X86_BASE} \
+ ${XSERVER_X86_EXT} \
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+ ${XSERVER_X86_VESA} \
+{{ if xserver == "y" and xserver_choice == "xserver_i915": }}
+ ${XSERVER_X86_I915} \
+{{ if xserver == "y" and xserver_choice == "xserver_i965": }}
+ ${XSERVER_X86_I965} \
+{{ if xserver == "y" and xserver_choice == "xserver_fbdev": }}
+ ${XSERVER_X86_FBDEV} \
+{{ if xserver == "y" and xserver_choice == "xserver_modesetting": }}
+ ${XSERVER_X86_MODESETTING} \
+{{ if xserver == "y": }}
+ "
+
+MACHINE_EXTRA_RRECOMMENDS += "linux-firmware v86d eee-acpi-scripts"
+
+EXTRA_OECONF_append_pn-matchbox-panel-2 = " --with-battery=acpi"
+
+GLIBC_ADDONS = "nptl"
+
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+APPEND += "video=vesafb vga=0x318"
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
new file mode 100644
index 0000000..ac9a0f1
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
@@ -0,0 +1 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
new file mode 100644
index 0000000..3083003
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
@@ -0,0 +1,2 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..7146e23
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,17 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH i386
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for preempt-rt kernels
+include cfg/usb-mass-storage.scc
+include cfg/boot-live.scc
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..67a54be
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,17 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH i386
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for standard kernels
+include cfg/usb-mass-storage.scc
+include cfg/boot-live.scc
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..91373b3
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH i386
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..69efdcc
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..85be26d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..4c59daa
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..3b168b7
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,55 @@
+# yocto-bsp-filename {{=machine}}.cfg
+CONFIG_X86_32=y
+CONFIG_MATOM=y
+CONFIG_PRINTK=y
+
+# Basic hardware support for the box - network, USB, PCI, sound
+CONFIG_NETDEVICES=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_ATA_SFF=y
+CONFIG_PCI=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_R8169=y
+CONFIG_PATA_SCH=y
+CONFIG_MMC_SDHCI_PCI=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_NET=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SATA_AHCI=y
+CONFIG_AGP=y
+CONFIG_PM=y
+CONFIG_ACPI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_INPUT=y
+
+# Make sure these are on, otherwise the bootup won't be fun
+CONFIG_EXT3_FS=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_MODULES=y
+CONFIG_SHMEM=y
+CONFIG_TMPFS=y
+CONFIG_PACKET=y
+
+# Needed for booting (and using) USB memory sticks
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+
+CONFIG_RD_GZIP=y
+
+# Needed for booting (and using) CD images
+CONFIG_BLK_DEV_SR=y
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..3d32f11
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,21 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+include features/intel-e1xxxx/intel-e100.scc
+include features/intel-e1xxxx/intel-e1xxxx.scc
+
+{{ if xserver == "y" and xserver_choice == "xserver_i915" or xserver_choice == "xserver_i965": }}
+include features/i915/i915.scc
+
+include features/serial/8250.scc
+include features/ericsson-3g/f5521gw.scc
+
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+include cfg/vesafb.scc
+
+include cfg/usb-mass-storage.scc
+include cfg/boot-live.scc
+include features/power/intel.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..aecdff0
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..dd4de31
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT b/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT
new file mode 100644
index 0000000..89de354
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/COPYING.MIT
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/README b/scripts/lib/bsp/substrate/target/arch/layer/README
new file mode 100644
index 0000000..ca6527c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/README
@@ -0,0 +1,64 @@
+This README file contains information on the contents of the
+{{=layer_name}} layer.
+
+Please see the corresponding sections below for details.
+
+
+Dependencies
+============
+
+This layer depends on:
+
+ URI: git://git.openembedded.org/bitbake
+ branch: master
+
+ URI: git://git.openembedded.org/openembedded-core
+ layers: meta
+ branch: master
+
+ URI: git://git.yoctoproject.org/xxxx
+ layers: xxxx
+ branch: master
+
+
+Patches
+=======
+
+Please submit any patches against the {{=layer_name}} layer to the
+xxxx mailing list (xxxx@zzzz.org) and cc: the maintainer:
+
+Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com>
+
+
+Table of Contents
+=================
+
+ I. Adding the {{=layer_name}} layer to your build
+ II. Misc
+
+
+I. Adding the {{=layer_name}} layer to your build
+=================================================
+
+--- replace with specific instructions for the {{=layer_name}} layer ---
+
+In order to use this layer, you need to make the build system aware of
+it.
+
+Assuming the {{=layer_name}} layer exists at the top-level of your
+yocto build tree, you can add it to the build system by adding the
+location of the {{=layer_name}} layer to bblayers.conf, along with any
+other layers needed. e.g.:
+
+ BBLAYERS ?= " \
+ /path/to/yocto/meta \
+ /path/to/yocto/meta-poky \
+ /path/to/yocto/meta-yocto-bsp \
+ /path/to/yocto/meta-{{=layer_name}} \
+ "
+
+
+II. Misc
+========
+
+--- replace with specific information about the {{=layer_name}} layer ---
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf b/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf
new file mode 100644
index 0000000..bdffe17
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/conf/layer.conf
@@ -0,0 +1,10 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have recipes-* directories, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "{{=layer_name}}"
+BBFILE_PATTERN_{{=layer_name}} = "^${LAYERDIR}/"
+BBFILE_PRIORITY_{{=layer_name}} = "{{=layer_priority}}"
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
new file mode 100644
index 0000000..e2a89c3
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/layer-questions.noinstall
@@ -0,0 +1,14 @@
+{{ input type:"edit" name:"layer_priority" prio:"20" msg:"Please enter the layer priority you'd like to use for the layer:" default:"6"}}
+
+{{ input type:"boolean" name:"create_example_recipe" prio:"20" msg:"Would you like to have an example recipe created? (y/n)" default:"n"}}
+
+{{ if create_example_recipe == "y": }}
+{{ input type:"edit" name:"example_recipe_name" prio:"20" msg:"Please enter the name you'd like to use for your example recipe:" default:"example"}}
+
+{{ input type:"boolean" name:"create_example_bbappend" prio:"20" msg:"Would you like to have an example bbappend file created? (y/n)" default:"n"}}
+
+{{ if create_example_bbappend == "y": }}
+{{ input type:"edit" name:"example_bbappend_name" prio:"20" msg:"Please enter the name you'd like to use for your bbappend file:" default:"example"}}
+
+{{ if create_example_bbappend == "y": }}
+{{ input type:"edit" name:"example_bbappend_version" prio:"20" msg:"Please enter the version number you'd like to use for your bbappend file (this should match the recipe you're appending to):" default:"0.1"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend.noinstall
new file mode 100644
index 0000000..3594e65
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if create_example_bbappend == "y": }} recipes-example-bbappend
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.bbappend b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.bbappend
new file mode 100644
index 0000000..3531330
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.bbappend
@@ -0,0 +1,9 @@
+# yocto-bsp-filename {{=example_bbappend_name}}_{{=example_bbappend_version}}.bbappend
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}:"
+
+#
+# This .bbappend doesn't yet do anything - replace this text with
+# modifications to the example_0.1.bb recipe, or whatever recipe it is
+# that you want to modify with this .bbappend (make sure you change
+# the recipe name (PN) and version (PV) to match).
+#
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.noinstall
new file mode 100644
index 0000000..46df8a8
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=example_bbappend_name}}-{{=example_bbappend_version}}
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version/example.patch b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version/example.patch
new file mode 100644
index 0000000..2000a34
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example-bbappend/example-bbappend/example-bbappend-version/example.patch
@@ -0,0 +1,12 @@
+#
+# This is a non-functional placeholder file, here for example purposes
+# only.
+#
+# If you had a patch for your recipe, you'd put it in this directory
+# and reference it from your recipe's SRC_URI:
+#
+# SRC_URI += "file://example.patch"
+#
+# Note that you could also rename the directory containing this patch
+# to remove the version number or simply rename it 'files'. Doing so
+# allows you to use the same directory for multiple recipes.
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example.noinstall
new file mode 100644
index 0000000..b0069b1
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if create_example_recipe == "y": }} recipes-example
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb
new file mode 100644
index 0000000..5fbf594
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb
@@ -0,0 +1,23 @@
+# yocto-bsp-filename {{=example_recipe_name}}_0.1.bb
+#
+# This file was derived from the 'Hello World!' example recipe in the
+# Yocto Project Development Manual.
+#
+
+SUMMARY = "Simple helloworld application"
+SECTION = "examples"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
+
+SRC_URI = "file://helloworld.c"
+
+S = "${WORKDIR}"
+
+do_compile() {
+ ${CC} helloworld.c -o helloworld
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 helloworld ${D}${bindir}
+}
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.noinstall b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.noinstall
new file mode 100644
index 0000000..c319c19
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=example_recipe_name}}-0.1
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/example.patch b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/example.patch
new file mode 100644
index 0000000..2000a34
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/example.patch
@@ -0,0 +1,12 @@
+#
+# This is a non-functional placeholder file, here for example purposes
+# only.
+#
+# If you had a patch for your recipe, you'd put it in this directory
+# and reference it from your recipe's SRC_URI:
+#
+# SRC_URI += "file://example.patch"
+#
+# Note that you could also rename the directory containing this patch
+# to remove the version number or simply rename it 'files'. Doing so
+# allows you to use the same directory for multiple recipes.
diff --git a/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/helloworld.c b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/helloworld.c
new file mode 100644
index 0000000..71f2e46
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1/helloworld.c
@@ -0,0 +1,8 @@
+#include <stdio.h>
+
+int main(int argc, char **argv)
+{
+ printf("Hello World!\n");
+
+ return 0;
+}
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/.gitignore b/scripts/lib/bsp/substrate/target/arch/mips/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf
new file mode 100644
index 0000000..b319d62
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf
@@ -0,0 +1,39 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+require conf/machine/include/tune-mips32.inc
+
+MACHINE_FEATURES = "screen keyboard pci usbhost ext2 ext3 serial"
+
+KERNEL_IMAGETYPE = "vmlinux"
+KERNEL_ALT_IMAGETYPE = "vmlinux.bin"
+KERNEL_IMAGE_STRIP_EXTRA_SECTIONS = ".comment"
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+{{ if xserver == "y": }}
+PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
+XSERVER ?= "xserver-xorg \
+ xf86-input-evdev \
+ xf86-video-fbdev"
+
+SERIAL_CONSOLE = "115200 ttyS0"
+USE_VT ?= "0"
+
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules"
+
+IMAGE_FSTYPES ?= "jffs2 tar.bz2"
+JFFS2_ERASEBLOCK = "0x10000"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..a128255
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..7c9dc52
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..64f395b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..47489e4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..85be26d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..97f747f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..2fe4766
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,2 @@
+# yocto-bsp-filename {{=machine}}.cfg
+CONFIG_MIPS=y
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..f39dc3e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,8 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+include cfg/usb-mass-storage.scc
+include cfg/fs/vfat.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..1e99a04
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..b88a06c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/.gitignore b/scripts/lib/bsp/substrate/target/arch/mips64/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf
new file mode 100644
index 0000000..3afc5e0
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf
@@ -0,0 +1,39 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+require conf/machine/include/tune-mips64.inc
+
+MACHINE_FEATURES = "pci ext2 ext3 serial"
+
+KERNEL_IMAGETYPE = "vmlinux"
+KERNEL_ALT_IMAGETYPE = "vmlinux.bin"
+KERNEL_IMAGE_STRIP_EXTRA_SECTIONS = ".comment"
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+{{ if xserver == "y": }}
+PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
+XSERVER ?= "xserver-xorg \
+ xf86-input-evdev \
+ xf86-video-fbdev"
+
+SERIAL_CONSOLE = "115200 ttyS0"
+USE_VT ?= "0"
+
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules"
+
+IMAGE_FSTYPES ?= "jffs2 tar.bz2"
+JFFS2_ERASEBLOCK = "0x10000"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..a128255
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..7c9dc52
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..64f395b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH mips
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..69efdcc
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..85be26d
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..4c59daa
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..0cc906b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,66 @@
+# yocto-bsp-filename {{=machine}}.cfg
+#SOC
+CONFIG_CAVIUM_OCTEON_SOC=y
+CONFIG_CAVIUM_CN63XXP1=y
+CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
+
+#Kernel
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+#Executable file formats
+CONFIG_MIPS32_COMPAT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+
+
+#PCI
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+
+#I2C
+CONFIG_I2C=y
+CONFIG_I2C_OCTEON=y
+
+CONFIG_HW_RANDOM_OCTEON=y
+
+#SPI
+CONFIG_SPI=y
+CONFIG_SPI_OCTEON=y
+
+#Misc
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_OCTEON_WDT=y
+
+CONFIG_STAGING=y
+
+#Ethernet
+CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_MGMT_ETHERNET=y
+CONFIG_MDIO_OCTEON=y
+
+#PHY
+CONFIG_MARVELL_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_BCM87XX_PHY=y
+
+
+#USB
+CONFIG_USB=y
+CONFIG_OCTEON_USB=y
+CONFIG_USB_OCTEON_EHCI=y
+CONFIG_USB_OCTEON_OHCI=y
+CONFIG_USB_OCTEON2_COMMON=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..f39dc3e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,8 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+include cfg/usb-mass-storage.scc
+include cfg/fs/vfat.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..01a046c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..57c90fa
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore b/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf
new file mode 100644
index 0000000..583c5e4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf
@@ -0,0 +1,87 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+TARGET_FPU = ""
+
+{{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_ppce300c3" }}
+{{ input type:"choice" val:"tune_ppc476" msg:"ppc476 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppc603e" msg:"ppc603e tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppc7400" msg:"ppc7400 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce300c2" msg:"ppce300c2 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce300c3" msg:"ppce300c3 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce500" msg:"ppce500 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce500mc" msg:"ppce500mc tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce500v2" msg:"ppce500v2 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce5500" msg:"ppce5500 tuning optimizations" }}
+{{ input type:"choice" val:"tune_ppce6500" msg:"ppce6500 tuning optimizations" }}
+{{ input type:"choice" val:"tune_power5" msg:"power5 tuning optimizations" }}
+{{ input type:"choice" val:"tune_power6" msg:"power6 tuning optimizations" }}
+{{ input type:"choice" val:"tune_power7" msg:"power7 tuning optimizations" }}
+{{ if tunefile == "tune_ppc476": }}
+include conf/machine/include/tune-ppc476.inc
+{{ if tunefile == "tune_ppc603e": }}
+include conf/machine/include/tune-ppc603e.inc
+{{ if tunefile == "tune_ppc7400": }}
+include conf/machine/include/tune-ppc7400.inc
+{{ if tunefile == "tune_ppce300c2": }}
+include conf/machine/include/tune-ppce300c2.inc
+{{ if tunefile == "tune_ppce300c3": }}
+include conf/machine/include/tune-ppce300c3.inc
+{{ if tunefile == "tune_ppce500": }}
+include conf/machine/include/tune-ppce500.inc
+{{ if tunefile == "tune_ppce500mc": }}
+include conf/machine/include/tune-ppce500mc.inc
+{{ if tunefile == "tune_ppce500v2": }}
+include conf/machine/include/tune-ppce500v2.inc
+{{ if tunefile == "tune_ppce5500": }}
+include conf/machine/include/tune-ppce5500.inc
+{{ if tunefile == "tune_ppce6500": }}
+include conf/machine/include/tune-ppce6500.inc
+{{ if tunefile == "tune_power5": }}
+include conf/machine/include/tune-power5.inc
+{{ if tunefile == "tune_power6": }}
+include conf/machine/include/tune-power6.inc
+{{ if tunefile == "tune_power7": }}
+include conf/machine/include/tune-power7.inc
+
+KERNEL_IMAGETYPE = "uImage"
+
+EXTRA_IMAGEDEPENDS += "u-boot"
+UBOOT_MACHINE_{{=machine}} = "MPC8315ERDB_config"
+
+SERIAL_CONSOLE = "115200 ttyS0"
+
+MACHINE_FEATURES = "keyboard pci ext2 ext3 serial"
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+{{ if xserver == "y": }}
+PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
+XSERVER ?= "xserver-xorg \
+ xf86-input-evdev \
+ xf86-video-fbdev"
+
+PREFERRED_VERSION_u-boot ?= "v2016.01%"
+{{ input type:"edit" name:"uboot_entrypoint" prio:"40" msg:"Please specify a value for UBOOT_ENTRYPOINT:" default:"0x00000000" }}
+UBOOT_ENTRYPOINT = "{{=uboot_entrypoint}}"
+
+{{ input type:"edit" name:"kernel_devicetree" prio:"40" msg:"Please specify a [arch/powerpc/boot/dts/xxx] value for KERNEL_DEVICETREE:" default:"mpc8315erdb.dts" }}
+KERNEL_DEVICETREE = "${S}/arch/powerpc/boot/dts/{{=kernel_devicetree}}"
+
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules"
+
+IMAGE_FSTYPES ?= "jffs2 tar.bz2"
+JFFS2_ERASEBLOCK = "0x4000"
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..91ccfb8
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH powerpc
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..89b344f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH powerpc
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..2701fd8
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH powerpc
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..47489e4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..582759e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..97f747f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..5bfe1fe
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,164 @@
+# yocto-bsp-filename {{=machine}}.cfg
+..........................................................................
+. WARNING
+.
+. This file is a kernel configuration fragment, and not a full kernel
+. configuration file. The final kernel configuration is made up of
+. an assembly of processed fragments, each of which is designed to
+. capture a specific part of the final configuration (e.g. platform
+. configuration, feature configuration, and board specific hardware
+. configuration). For more information on kernel configuration, please
+. consult the product documentation.
+.
+..........................................................................
+CONFIG_PPC32=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+
+#
+# Processor support
+#
+CONFIG_PPC_83xx=y
+
+#
+# Platform support
+#
+CONFIG_MPC831x_RDB=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_PMAC is not set
+
+#
+# Bus options
+#
+CONFIG_PCI=y
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_PHYSMAP_OF=y
+
+#
+# NAND Flash Device Drivers
+#
+CONFIG_MTD_NAND=y
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_GIANFAR=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_8xxx_WDT=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_MPC=y
+
+CONFIG_SENSORS_LM75=y
+
+CONFIG_MISC_DEVICES=y
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_EEPROM_AT24=y
+
+#
+# SPI support
+#
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_MPC8xxx=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_HWMON=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LOGGING=y
+
+CONFIG_ATA=y
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_FSL=y
+CONFIG_ATA_SFF=y
+
+#
+# USB support
+#
+CONFIG_USB=m
+CONFIG_USB_DEVICEFS=y
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_STORAGE=m
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_CLASS=y
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=y
+
+CONFIG_KGDB_8250=m
+
+CONFIG_CRYPTO_DEV_TALITOS=m
+
+CONFIG_FSL_DMA=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SPI=m
+
+CONFIG_USB_FSL_MPH_DR_OF=y
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..7aac8b0
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,10 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+include cfg/usb-mass-storage.scc
+include cfg/fs/vfat.scc
+
+include cfg/dmaengine.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..1e99a04
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..b88a06c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf
new file mode 100644
index 0000000..67e1cbd
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf
@@ -0,0 +1,74 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
+PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
+PREFERRED_PROVIDER_virtual/libgl ?= "mesa"
+PREFERRED_PROVIDER_virtual/libgles1 ?= "mesa"
+PREFERRED_PROVIDER_virtual/libgles2 ?= "mesa"
+
+{{ input type:"choicelist" name:"qemuarch" prio:"5" msg:"Which qemu architecture would you like to use?" default:"i386" }}
+{{ input type:"choice" val:"i386" msg:"i386 (32-bit)" }}
+{{ input type:"choice" val:"x86_64" msg:"x86_64 (64-bit)" }}
+{{ input type:"choice" val:"arm" msg:"ARM (32-bit)" }}
+{{ input type:"choice" val:"powerpc" msg:"PowerPC (32-bit)" }}
+{{ input type:"choice" val:"mips" msg:"MIPS (32-bit)" }}
+{{ input type:"choice" val:"mips64" msg:"MIPS64 (64-bit)" }}
+{{ if qemuarch == "i386": }}
+require conf/machine/include/qemu.inc
+require conf/machine/include/tune-i586.inc
+{{ if qemuarch == "x86_64": }}
+require conf/machine/include/qemu.inc
+DEFAULTTUNE ?= "core2-64"
+require conf/machine/include/tune-core2.inc
+{{ if qemuarch == "arm": }}
+require conf/machine/include/qemu.inc
+require conf/machine/include/tune-arm926ejs.inc
+{{ if qemuarch == "powerpc": }}
+require conf/machine/include/qemu.inc
+require conf/machine/include/tune-ppc7400.inc
+{{ if qemuarch == "mips": }}
+require conf/machine/include/qemu.inc
+require conf/machine/include/tune-mips32.inc
+{{ if qemuarch == "mips64": }}
+require conf/machine/include/qemu.inc
+require conf/machine/include/tune-mips64.inc
+
+{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
+MACHINE_FEATURES += "x86"
+KERNEL_IMAGETYPE = "bzImage"
+SERIAL_CONSOLE = "115200 ttyS0"
+XSERVER = "xserver-xorg \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \
+ xf86-input-vmmouse \
+ xf86-input-keyboard \
+ xf86-input-evdev \
+ xf86-video-vmware"
+
+{{ if qemuarch == "arm": }}
+KERNEL_IMAGETYPE = "zImage"
+SERIAL_CONSOLE = "115200 ttyAMA0"
+
+{{ if qemuarch == "powerpc": }}
+KERNEL_IMAGETYPE = "vmlinux"
+SERIAL_CONSOLE = "115200 ttyS0"
+
+{{ if qemuarch == "mips" or qemuarch == "mips64": }}
+KERNEL_IMAGETYPE = "vmlinux"
+KERNEL_ALT_IMAGETYPE = "vmlinux.bin"
+SERIAL_CONSOLE = "115200 ttyS0"
+MACHINE_EXTRA_RRECOMMENDS = " kernel-modules"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine/interfaces b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine/interfaces
new file mode 100644
index 0000000..1696776
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown/machine/interfaces
@@ -0,0 +1,5 @@
+# /etc/network/interfaces -- configuration file for ifup(8), ifdown(8)
+
+# The loopback interface
+auto lo
+iface lo inet loopback
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend
new file mode 100644
index 0000000..72d991c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-core/init-ifupdown/init-ifupdown_1.0.bbappend
@@ -0,0 +1 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
new file mode 100644
index 0000000..3bdde79
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
@@ -0,0 +1,77 @@
+
+Section "Files"
+EndSection
+
+Section "InputDevice"
+ Identifier "Generic Keyboard"
+ Driver "evdev"
+ Option "CoreKeyboard"
+ Option "Device" "/dev/input/by-path/platform-i8042-serio-0-event-kbd"
+ Option "XkbRules" "xorg"
+ Option "XkbModel" "evdev"
+ Option "XkbLayout" "us"
+EndSection
+
+Section "InputDevice"
+ Identifier "Configured Mouse"
+{{ if qemuarch == "arm" or qemuarch == "powerpc" or qemuarch == "mips" or qemuarch == "mips64": }}
+ Driver "mouse"
+{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
+ Driver "vmmouse"
+
+ Option "CorePointer"
+ Option "Device" "/dev/input/mice"
+ Option "Protocol" "ImPS/2"
+ Option "ZAxisMapping" "4 5"
+ Option "Emulate3Buttons" "true"
+EndSection
+
+Section "InputDevice"
+ Identifier "Qemu Tablet"
+ Driver "evdev"
+ Option "CorePointer"
+ Option "Device" "/dev/input/touchscreen0"
+ Option "USB" "on"
+EndSection
+
+Section "Device"
+ Identifier "Graphics Controller"
+{{ if qemuarch == "arm" or qemuarch == "powerpc" or qemuarch == "mips" or qemuarch == "mips64": }}
+ Driver "fbdev"
+{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
+ Driver "vmware"
+
+EndSection
+
+Section "Monitor"
+ Identifier "Generic Monitor"
+ Option "DPMS"
+ # 1024x600 59.85 Hz (CVT) hsync: 37.35 kHz; pclk: 49.00 MHz
+ Modeline "1024x600_60.00" 49.00 1024 1072 1168 1312 600 603 613 624 -hsync +vsync
+ # 640x480 @ 60Hz (Industry standard) hsync: 31.5kHz
+ ModeLine "640x480" 25.2 640 656 752 800 480 490 492 525 -hsync -vsync
+ # 640x480 @ 72Hz (VESA) hsync: 37.9kHz
+ ModeLine "640x480" 31.5 640 664 704 832 480 489 491 520 -hsync -vsync
+ # 640x480 @ 75Hz (VESA) hsync: 37.5kHz
+ ModeLine "640x480" 31.5 640 656 720 840 480 481 484 500 -hsync -vsync
+ # 640x480 @ 85Hz (VESA) hsync: 43.3kHz
+ ModeLine "640x480" 36.0 640 696 752 832 480 481 484 509 -hsync -vsync
+EndSection
+
+Section "Screen"
+ Identifier "Default Screen"
+ Device "Graphics Controller"
+ Monitor "Generic Monitor"
+ SubSection "Display"
+ Modes "640x480"
+ EndSubSection
+EndSection
+
+Section "ServerLayout"
+ Identifier "Default Layout"
+ Screen "Default Screen"
+ InputDevice "Generic Keyboard"
+ # InputDevice "Configured Mouse"
+ InputDevice "QEMU Tablet"
+ Option "AllowEmptyInput" "no"
+EndSection
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
new file mode 100644
index 0000000..72d991c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
@@ -0,0 +1 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..0fb5283
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..a81b858
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH {{=qemuarch}}
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..14554da
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,20 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH {{=qemuarch}}
+
+{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if qemuarch == "arm": }}
+include bsp/arm-versatile-926ejs/arm-versatile-926ejs-standard
+{{ if qemuarch == "powerpc": }}
+include bsp/qemu-ppc32/qemu-ppc32-standard
+{{ if qemuarch == "mips": }}
+include bsp/mti-malta32/mti-malta32-be-standard
+{{ if qemuarch == "mips64": }}
+include bsp/mti-malta64/mti-malta64-be-standard
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..41d4c6f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH {{=qemuarch}}
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..69efdcc
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..582759e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..4c59daa
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..d560784
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}.cfg \ No newline at end of file
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..8301e05
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,5 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..7e3ce5b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,55 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta64" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..14ee16f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,62 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..e256e08
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,62 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..fce67b4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,61 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta64" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..4097932
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,61 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta64" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore b/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/.gitignore
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/machine.conf b/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/machine.conf
new file mode 100644
index 0000000..e4b8251
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/conf/machine/machine.conf
@@ -0,0 +1,65 @@
+# yocto-bsp-filename {{=machine}}.conf
+#@TYPE: Machine
+#@NAME: {{=machine}}
+
+#@DESCRIPTION: Machine configuration for {{=machine}} systems
+
+{{ if kernel_choice == "custom": preferred_kernel = "linux-yocto-custom" }}
+{{ if kernel_choice == "linux-yocto-dev": preferred_kernel = "linux-yocto-dev" }}
+{{ if kernel_choice == "custom" or kernel_choice == "linux-yocto-dev" : }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel = kernel_choice.split('_')[0] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": preferred_kernel_version = kernel_choice.split('_')[1] }}
+{{ if kernel_choice != "custom" and kernel_choice != "linux-yocto-dev": }}
+PREFERRED_PROVIDER_virtual/kernel ?= "{{=preferred_kernel}}"
+PREFERRED_VERSION_{{=preferred_kernel}} ?= "{{=preferred_kernel_version}}%"
+
+{{ input type:"choicelist" name:"tunefile" prio:"40" msg:"Which machine tuning would you like to use?" default:"tune_core2" }}
+{{ input type:"choice" val:"tune_core2" msg:"Core2 tuning optimizations" }}
+{{ input type:"choice" val:"tune_corei7" msg:"Corei7 tuning optimizations" }}
+{{ if tunefile == "tune_core2": }}
+DEFAULTTUNE ?= "core2-64"
+require conf/machine/include/tune-core2.inc
+{{ if tunefile == "tune_corei7": }}
+DEFAULTTUNE ?= "corei7-64"
+require conf/machine/include/tune-corei7.inc
+
+require conf/machine/include/x86-base.inc
+
+MACHINE_FEATURES += "wifi efi pcbios"
+
+{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
+
+{{ if xserver == "y": }}
+{{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_i915" }}
+
+{{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }}
+{{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }}
+{{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }}
+{{ input type:"choice" val:"xserver_fbdev" msg:"fbdev xserver support" }}
+{{ input type:"choice" val:"xserver_modesetting" msg:"modesetting xserver support" }}
+{{ if xserver == "y": }}
+XSERVER ?= "${XSERVER_X86_BASE} \
+ ${XSERVER_X86_EXT} \
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+ ${XSERVER_X86_VESA} \
+{{ if xserver == "y" and xserver_choice == "xserver_i915": }}
+ ${XSERVER_X86_I915} \
+{{ if xserver == "y" and xserver_choice == "xserver_i965": }}
+ ${XSERVER_X86_I965} \
+{{ if xserver == "y" and xserver_choice == "xserver_fbdev": }}
+ ${XSERVER_X86_FBDEV} \
+{{ if xserver == "y" and xserver_choice == "xserver_modesetting": }}
+ ${XSERVER_X86_MODESETTING} \
+{{ if xserver == "y": }}
+ "
+
+MACHINE_EXTRA_RRECOMMENDS += "linux-firmware v86d eee-acpi-scripts"
+
+EXTRA_OECONF_append_pn-matchbox-panel-2 = " --with-battery=acpi"
+
+GLIBC_ADDONS = "nptl"
+
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+APPEND += "video=vesafb vga=0x318"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
new file mode 100644
index 0000000..b442d02
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{=machine}}
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
new file mode 100644
index 0000000..ac9a0f1
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config/machine/xorg.conf
@@ -0,0 +1 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
new file mode 100644
index 0000000..3083003
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend
@@ -0,0 +1,2 @@
+# yocto-bsp-filename {{ if xserver == "y": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files.noinstall b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files.noinstall
new file mode 100644
index 0000000..1e0d92c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files.noinstall
@@ -0,0 +1 @@
+# yocto-bsp-dirname {{ if kernel_choice != "custom": }} files
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-preempt-rt.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-preempt-rt.scc
new file mode 100644
index 0000000..bbeeecd
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-preempt-rt.scc
@@ -0,0 +1,17 @@
+# yocto-bsp-filename {{=machine}}-preempt-rt.scc
+define KMACHINE {{=machine}}
+
+define KARCH x86_64
+
+include {{=map_preempt_rt_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for preempt-rt kernels
+include cfg/usb-mass-storage.scc
+include cfg/boot-live.scc
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc
new file mode 100644
index 0000000..9c9cc90
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc
@@ -0,0 +1,17 @@
+# yocto-bsp-filename {{=machine}}-standard.scc
+define KMACHINE {{=machine}}
+
+define KARCH x86_64
+
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
+
+# default policy for standard kernels
+include cfg/usb-mass-storage.scc
+include cfg/boot-live.scc
+include features/latencytop/latencytop.scc
+include features/profiling/profiling.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-tiny.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-tiny.scc
new file mode 100644
index 0000000..b53706f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-tiny.scc
@@ -0,0 +1,11 @@
+# yocto-bsp-filename {{=machine}}-tiny.scc
+define KMACHINE {{=machine}}
+
+define KARCH x86_64
+
+include {{=map_tiny_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+{{ if need_new_kbranch == "y": }}
+define KTYPE {{=new_kbranch}}
+branch {{=machine}}
+
+include {{=machine}}.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-config.cfg b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-config.cfg
new file mode 100644
index 0000000..47489e4
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-config.cfg
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-config.cfg
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-features.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-features.scc
new file mode 100644
index 0000000..582759e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-features.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-features.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-patches.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-patches.scc
new file mode 100644
index 0000000..97f747f
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-user-patches.scc
@@ -0,0 +1 @@
+# yocto-bsp-filename {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.cfg b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.cfg
new file mode 100644
index 0000000..3290dde
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.cfg
@@ -0,0 +1,48 @@
+# yocto-bsp-filename {{=machine}}.cfg
+CONFIG_PRINTK=y
+
+# Basic hardware support for the box - network, USB, PCI, sound
+CONFIG_NETDEVICES=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_ATA_SFF=y
+CONFIG_PCI=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_R8169=y
+CONFIG_PATA_SCH=y
+CONFIG_MMC_SDHCI_PCI=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_NET=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_INTEL=y
+
+# Make sure these are on, otherwise the bootup won't be fun
+CONFIG_EXT3_FS=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_MODULES=y
+CONFIG_SHMEM=y
+CONFIG_TMPFS=y
+CONFIG_PACKET=y
+
+CONFIG_I2C=y
+CONFIG_AGP=y
+CONFIG_PM=y
+CONFIG_ACPI=y
+CONFIG_INPUT=y
+
+# Needed for booting (and using) USB memory sticks
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+
+CONFIG_RD_GZIP=y
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.scc b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.scc
new file mode 100644
index 0000000..9b7c291
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine.scc
@@ -0,0 +1,14 @@
+# yocto-bsp-filename {{=machine}}.scc
+kconf hardware {{=machine}}.cfg
+
+include features/serial/8250.scc
+{{ if xserver == "y" and xserver_choice == "xserver_vesa": }}
+include cfg/vesafb.scc
+{{ if xserver == "y" and xserver_choice == "xserver_i915" or xserver_choice == "xserver_i965": }}
+include features/i915/i915.scc
+
+include cfg/usb-mass-storage.scc
+include features/power/intel.scc
+
+kconf hardware {{=machine}}-user-config.cfg
+include {{=machine}}-user-patches.scc
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
new file mode 100644
index 0000000..00cf360
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
@@ -0,0 +1,5 @@
+{{ if kernel_choice != "custom": }}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+
+{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-dev.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-dev.bbappend
new file mode 100644
index 0000000..c336007
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-dev.bbappend
@@ -0,0 +1,25 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-dev": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
new file mode 100644
index 0000000..0a47a4e
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
new file mode 100644
index 0000000..815c77b
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..1e99a04
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.1": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
diff --git a/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..b88a06c
--- /dev/null
+++ b/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.4": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.4"
diff --git a/scripts/lib/bsp/tags.py b/scripts/lib/bsp/tags.py
new file mode 100644
index 0000000..3719427
--- /dev/null
+++ b/scripts/lib/bsp/tags.py
@@ -0,0 +1,49 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides a place to define common constants for the
+# Yocto BSP Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] intel.com>
+#
+
+OPEN_TAG = "{{"
+CLOSE_TAG = "}}"
+ASSIGN_TAG = "{{="
+INPUT_TAG = "input"
+IF_TAG = "if"
+FILENAME_TAG = "yocto-bsp-filename"
+DIRNAME_TAG = "yocto-bsp-dirname"
+
+INDENT_STR = " "
+
+BLANKLINE_STR = "of.write(\"\\n\")"
+NORMAL_START = "of.write"
+OPEN_START = "current_file ="
+
+INPUT_TYPE_PROPERTY = "type"
+
+SRC_URI_FILE = "file://"
+
+GIT_CHECK_URI = "git://git.yoctoproject.org/linux-yocto-dev.git"
+
+
+
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
new file mode 100644
index 0000000..ff97dfc
--- /dev/null
+++ b/scripts/lib/devtool/__init__.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+
+# Development tool - utility functions for plugins
+#
+# Copyright (C) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugins module"""
+
+import os
+import sys
+import subprocess
+import logging
+import re
+
+logger = logging.getLogger('devtool')
+
+
+class DevtoolError(Exception):
+ """Exception for handling devtool errors"""
+ pass
+
+
+def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
+ """Run a program in bitbake build context"""
+ import bb
+ if not 'cwd' in options:
+ options["cwd"] = builddir
+ if init_path:
+ # As the OE init script makes use of BASH_SOURCE to determine OEROOT,
+ # and can't determine it when running under dash, we need to set
+ # the executable to bash to correctly set things up
+ if not 'executable' in options:
+ options['executable'] = 'bash'
+ logger.debug('Executing command: "%s" using init path %s' % (cmd, init_path))
+ init_prefix = '. %s %s > /dev/null && ' % (init_path, builddir)
+ else:
+ logger.debug('Executing command "%s"' % cmd)
+ init_prefix = ''
+ if watch:
+ if sys.stdout.isatty():
+ # Fool bitbake into thinking it's outputting to a terminal (because it is, indirectly)
+ cmd = 'script -e -q -c "%s" /dev/null' % cmd
+ return exec_watch('%s%s' % (init_prefix, cmd), **options)
+ else:
+ return bb.process.run('%s%s' % (init_prefix, cmd), **options)
+
+def exec_watch(cmd, **options):
+ """Run program with stdout shown on sys.stdout"""
+ import bb
+ if isinstance(cmd, basestring) and not "shell" in options:
+ options["shell"] = True
+
+ process = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
+ )
+
+ buf = ''
+ while True:
+ out = process.stdout.read(1)
+ if out:
+ sys.stdout.write(out)
+ sys.stdout.flush()
+ buf += out
+ elif out == '' and process.poll() != None:
+ break
+
+ if process.returncode != 0:
+ raise bb.process.ExecutionError(cmd, process.returncode, buf, None)
+
+ return buf, None
+
+def exec_fakeroot(d, cmd, **kwargs):
+ """Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
+ # Grab the command and check it actually exists
+ fakerootcmd = d.getVar('FAKEROOTCMD', True)
+ if not os.path.exists(fakerootcmd):
+ logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
+ return 2
+ # Set up the appropriate environment
+ newenv = dict(os.environ)
+ fakerootenv = d.getVar('FAKEROOTENV', True)
+ for varvalue in fakerootenv.split():
+ if '=' in varvalue:
+ splitval = varvalue.split('=', 1)
+ newenv[splitval[0]] = splitval[1]
+ return subprocess.call("%s %s" % (fakerootcmd, cmd), env=newenv, **kwargs)
+
+def setup_tinfoil(config_only=False, basepath=None, tracking=False):
+ """Initialize tinfoil api from bitbake"""
+ import scriptpath
+ orig_cwd = os.path.abspath(os.curdir)
+ try:
+ if basepath:
+ os.chdir(basepath)
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
+ tinfoil.prepare(config_only)
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ finally:
+ os.chdir(orig_cwd)
+ return tinfoil
+
+def get_recipe_file(cooker, pn):
+ """Find recipe file corresponding a package name"""
+ import oe.recipeutils
+ recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
+ if not recipefile:
+ skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
+ if skipreasons:
+ logger.error('\n'.join(skipreasons))
+ else:
+ logger.error("Unable to find any recipe file matching %s" % pn)
+ return recipefile
+
+def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
+ """Parse recipe of a package"""
+ import oe.recipeutils
+ recipefile = get_recipe_file(tinfoil.cooker, pn)
+ if not recipefile:
+ # Error already logged
+ return None
+ if appends:
+ append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
+ if filter_workspace:
+ # Filter out appends from the workspace
+ append_files = [path for path in append_files if
+ not path.startswith(config.workspace_path)]
+ else:
+ append_files = None
+ return oe.recipeutils.parse_recipe(recipefile, append_files,
+ tinfoil.config_data)
+
+def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
+ """
+ Check that a recipe is in the workspace and (optionally) that source
+ is present.
+ """
+
+ workspacepn = pn
+
+ for recipe, value in workspace.iteritems():
+ if recipe == pn:
+ break
+ if bbclassextend:
+ recipefile = value['recipefile']
+ if recipefile:
+ targets = get_bbclassextend_targets(recipefile, recipe)
+ if pn in targets:
+ workspacepn = recipe
+ break
+ else:
+ raise DevtoolError("No recipe named '%s' in your workspace" % pn)
+
+ if checksrc:
+ srctree = workspace[workspacepn]['srctree']
+ if not os.path.exists(srctree):
+ raise DevtoolError("Source tree %s for recipe %s does not exist" % (srctree, workspacepn))
+ if not os.listdir(srctree):
+ raise DevtoolError("Source tree %s for recipe %s is empty" % (srctree, workspacepn))
+
+ return workspacepn
+
+def use_external_build(same_dir, no_same_dir, d):
+ """
+ Determine if we should use B!=S (separate build and source directories) or not
+ """
+ b_is_s = True
+ if no_same_dir:
+ logger.info('Using separate build directory since --no-same-dir specified')
+ b_is_s = False
+ elif same_dir:
+ logger.info('Using source tree as build directory since --same-dir specified')
+ elif bb.data.inherits_class('autotools-brokensep', d):
+ logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
+ elif d.getVar('B', True) == os.path.abspath(d.getVar('S', True)):
+ logger.info('Using source tree as build directory since that would be the default for this recipe')
+ else:
+ b_is_s = False
+ return b_is_s
+
+def setup_git_repo(repodir, version, devbranch, basetag='devtool-base'):
+ """
+ Set up the git repository for the source tree
+ """
+ import bb.process
+ if not os.path.exists(os.path.join(repodir, '.git')):
+ bb.process.run('git init', cwd=repodir)
+ bb.process.run('git add .', cwd=repodir)
+ commit_cmd = ['git', 'commit', '-q']
+ stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
+ if not stdout:
+ commit_cmd.append('--allow-empty')
+ commitmsg = "Initial empty commit with no upstream sources"
+ elif version:
+ commitmsg = "Initial commit from upstream at version %s" % version
+ else:
+ commitmsg = "Initial commit from upstream"
+ commit_cmd += ['-m', commitmsg]
+ bb.process.run(commit_cmd, cwd=repodir)
+
+ bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
+ bb.process.run('git tag -f %s' % basetag, cwd=repodir)
+
+def recipe_to_append(recipefile, config, wildcard=False):
+ """
+ Convert a recipe file to a bbappend file path within the workspace.
+ NOTE: if the bbappend already exists, you should be using
+ workspace[args.recipename]['bbappend'] instead of calling this
+ function.
+ """
+ appendname = os.path.splitext(os.path.basename(recipefile))[0]
+ if wildcard:
+ appendname = re.sub(r'_.*', '_%', appendname)
+ appendpath = os.path.join(config.workspace_path, 'appends')
+ appendfile = os.path.join(appendpath, appendname + '.bbappend')
+ return appendfile
+
+def get_bbclassextend_targets(recipefile, pn):
+ """
+ Cheap function to get BBCLASSEXTEND and then convert that to the
+ list of targets that would result.
+ """
+ import bb.utils
+
+ values = {}
+ def get_bbclassextend_varfunc(varname, origvalue, op, newlines):
+ values[varname] = origvalue
+ return origvalue, None, 0, True
+ with open(recipefile, 'r') as f:
+ bb.utils.edit_metadata(f, ['BBCLASSEXTEND'], get_bbclassextend_varfunc)
+
+ targets = []
+ bbclassextend = values.get('BBCLASSEXTEND', '').split()
+ if bbclassextend:
+ for variant in bbclassextend:
+ if variant == 'nativesdk':
+ targets.append('%s-%s' % (variant, pn))
+ elif variant in ['native', 'cross', 'crosssdk']:
+ targets.append('%s-%s' % (pn, variant))
+ return targets
diff --git a/scripts/lib/devtool/build.py b/scripts/lib/devtool/build.py
new file mode 100644
index 0000000..48f6fe1
--- /dev/null
+++ b/scripts/lib/devtool/build.py
@@ -0,0 +1,86 @@
+# Development tool - build command plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool build plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import tempfile
+from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+
+def _set_file_values(fn, values):
+ remaining = values.keys()
+
+ def varfunc(varname, origvalue, op, newlines):
+ newvalue = values.get(varname, origvalue)
+ remaining.remove(varname)
+ return (newvalue, '=', 0, True)
+
+ with open(fn, 'r') as f:
+ (updated, newlines) = bb.utils.edit_metadata(f, values, varfunc)
+
+ for item in remaining:
+ updated = True
+ newlines.append('%s = "%s"' % (item, values[item]))
+
+ if updated:
+ with open(fn, 'w') as f:
+ f.writelines(newlines)
+ return updated
+
+def _get_build_tasks(config):
+ tasks = config.get('Build', 'build_task', 'populate_sysroot,packagedata').split(',')
+ return ['do_%s' % task.strip() for task in tasks]
+
+def build(args, config, basepath, workspace):
+ """Entry point for the devtool 'build' subcommand"""
+ workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+
+ build_tasks = _get_build_tasks(config)
+
+ bbappend = workspace[workspacepn]['bbappend']
+ if args.disable_parallel_make:
+ logger.info("Disabling 'make' parallelism")
+ _set_file_values(bbappend, {'PARALLEL_MAKE': ''})
+ try:
+ bbargs = []
+ for task in build_tasks:
+ if args.recipename.endswith('-native') and 'package' in task:
+ continue
+ bbargs.append('%s:%s' % (args.recipename, task))
+ exec_build_env_command(config.init_path, basepath, 'bitbake %s' % ' '.join(bbargs), watch=True)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+ finally:
+ if args.disable_parallel_make:
+ _set_file_values(bbappend, {'PARALLEL_MAKE': None})
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_build = subparsers.add_parser('build', help='Build a recipe',
+ description='Builds the specified recipe using bitbake (up to and including %s)' % ', '.join(_get_build_tasks(context.config)),
+ group='working')
+ parser_build.add_argument('recipename', help='Recipe to build')
+ parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
+ parser_build.set_defaults(func=build)
diff --git a/scripts/lib/devtool/build_image.py b/scripts/lib/devtool/build_image.py
new file mode 100644
index 0000000..14c646a
--- /dev/null
+++ b/scripts/lib/devtool/build_image.py
@@ -0,0 +1,168 @@
+# Development tool - build-image plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool plugin containing the build-image subcommand."""
+
+import os
+import errno
+import logging
+
+from bb.process import ExecutionError
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+class TargetNotImageError(Exception):
+ pass
+
+def _get_packages(tinfoil, workspace, config):
+ """Get list of packages from recipes in the workspace."""
+ result = []
+ for recipe in workspace:
+ data = parse_recipe(config, tinfoil, recipe, True)
+ if 'class-target' in data.getVar('OVERRIDES', True).split(':'):
+ if recipe in data.getVar('PACKAGES', True).split():
+ result.append(recipe)
+ else:
+ logger.warning("Skipping recipe %s as it doesn't produce a "
+ "package with the same name", recipe)
+ return result
+
+def build_image(args, config, basepath, workspace):
+ """Entry point for the devtool 'build-image' subcommand."""
+
+ image = args.imagename
+ auto_image = False
+ if not image:
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ image = sdk_targets[0]
+ auto_image = True
+ if not image:
+ raise DevtoolError('Unable to determine image to build, please specify one')
+
+ try:
+ if args.add_packages:
+ add_packages = args.add_packages.split(',')
+ else:
+ add_packages = None
+ result, outputdir = build_image_task(config, basepath, workspace, image, add_packages)
+ except TargetNotImageError:
+ if auto_image:
+ raise DevtoolError('Unable to determine image to build, please specify one')
+ else:
+ raise DevtoolError('Specified recipe %s is not an image recipe' % image)
+
+ if result == 0:
+ logger.info('Successfully built %s. You can find output files in %s'
+ % (image, outputdir))
+ return result
+
+def build_image_task(config, basepath, workspace, image, add_packages=None, task=None, extra_append=None):
+ # remove <image>.bbappend to make sure setup_tinfoil doesn't
+ # break because of it
+ target_basename = config.get('SDK', 'target_basename', '')
+ if target_basename:
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+ try:
+ os.unlink(appendfile)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ rd = parse_recipe(config, tinfoil, image, True)
+ if not rd:
+ # Error already shown
+ return (1, None)
+ if not bb.data.inherits_class('image', rd):
+ raise TargetNotImageError()
+
+ # Get the actual filename used and strip the .bb and full path
+ target_basename = rd.getVar('FILE', True)
+ target_basename = os.path.splitext(os.path.basename(target_basename))[0]
+ config.set('SDK', 'target_basename', target_basename)
+ config.write()
+
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+
+ outputdir = None
+ try:
+ if workspace or add_packages:
+ if add_packages:
+ packages = add_packages
+ else:
+ packages = _get_packages(tinfoil, workspace, config)
+ else:
+ packages = None
+ if not task:
+ if not packages and not add_packages and workspace:
+ logger.warning('No recipes in workspace, building image %s unmodified', image)
+ elif not packages:
+ logger.warning('No packages to add, building image %s unmodified', image)
+
+ if packages or extra_append:
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as afile:
+ if packages:
+ # include packages from workspace recipes into the image
+ afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ if not task:
+ logger.info('Building image %s with the following '
+ 'additional packages: %s', image, ' '.join(packages))
+ if extra_append:
+ for line in extra_append:
+ afile.write('%s\n' % line)
+
+ if task in ['populate_sdk', 'populate_sdk_ext']:
+ outputdir = rd.getVar('SDK_DEPLOY', True)
+ else:
+ outputdir = rd.getVar('DEPLOY_DIR_IMAGE', True)
+
+ tinfoil.shutdown()
+
+ options = ''
+ if task:
+ options += '-c %s' % task
+
+ # run bitbake to build image (or specified task)
+ try:
+ exec_build_env_command(config.init_path, basepath,
+ 'bitbake %s %s' % (options, image), watch=True)
+ except ExecutionError as err:
+ return (err.exitcode, None)
+ finally:
+ if os.path.isfile(appendfile):
+ os.unlink(appendfile)
+ return (0, outputdir)
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the build-image plugin"""
+ parser = subparsers.add_parser('build-image',
+ help='Build image including workspace recipe packages',
+ description='Builds an image, extending it to include '
+ 'packages from recipes in the workspace',
+ group='testbuild', order=-10)
+ parser.add_argument('imagename', help='Image recipe to build', nargs='?')
+ parser.add_argument('-p', '--add-packages', help='Instead of adding packages for the '
+ 'entire workspace, specify packages to be added to the image '
+ '(separate multiple packages by commas)',
+ metavar='PACKAGES')
+ parser.set_defaults(func=build_image)
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
new file mode 100644
index 0000000..b89d65b
--- /dev/null
+++ b/scripts/lib/devtool/build_sdk.py
@@ -0,0 +1,65 @@
+# Development tool - build-sdk command plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import subprocess
+import logging
+import glob
+import shutil
+import errno
+import sys
+import tempfile
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+from devtool import build_image
+
+logger = logging.getLogger('devtool')
+
+
+def build_sdk(args, config, basepath, workspace):
+ """Entry point for the devtool build-sdk command"""
+
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ image = sdk_targets[0]
+ else:
+ raise DevtoolError('Unable to determine image to build SDK for')
+
+ extra_append = ['SDK_DERIVATIVE = "1"']
+ try:
+ result, outputdir = build_image.build_image_task(config,
+ basepath,
+ workspace,
+ image,
+ task='populate_sdk_ext',
+ extra_append=extra_append)
+ except build_image.TargetNotImageError:
+ raise DevtoolError('Unable to determine image to build SDK for')
+
+ if result == 0:
+ logger.info('Successfully built SDK. You can find output files in %s'
+ % outputdir)
+ return result
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands"""
+ if context.fixed_setup:
+ parser_build_sdk = subparsers.add_parser('build-sdk',
+ help='Build a derivative SDK of this one',
+ description='Builds an extensible SDK based upon this one and the items in your workspace',
+ group='advanced')
+ parser_build_sdk.set_defaults(func=build_sdk)
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
new file mode 100644
index 0000000..66644cc
--- /dev/null
+++ b/scripts/lib/devtool/deploy.py
@@ -0,0 +1,304 @@
+# Development tool - deploy/undeploy command plugin
+#
+# Copyright (C) 2014-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugin containing the deploy subcommands"""
+
+import os
+import subprocess
+import logging
+import tempfile
+import shutil
+import argparse_oe
+from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+deploylist_path = '/.devtool'
+
+def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=False, nopreserve=False, nocheckspace=False):
+ """
+ Prepare a shell script for running on the target to
+ deploy/undeploy files. We have to be careful what we put in this
+ script - only commands that are likely to be available on the
+ target are suitable (the target might be constrained, e.g. using
+ busybox rather than bash with coreutils).
+ """
+ lines = []
+ lines.append('#!/bin/sh')
+ lines.append('set -e')
+ if undeployall:
+ # Yes, I know this is crude - but it does work
+ lines.append('for entry in %s/*.list; do' % deploylist_path)
+ lines.append('[ ! -f $entry ] && exit')
+ lines.append('set `basename $entry | sed "s/.list//"`')
+ if dryrun:
+ if not deploy:
+ lines.append('echo "Previously deployed files for $1:"')
+ lines.append('manifest="%s/$1.list"' % deploylist_path)
+ lines.append('preservedir="%s/$1.preserve"' % deploylist_path)
+ lines.append('if [ -f $manifest ] ; then')
+ # Read manifest in reverse and delete files / remove empty dirs
+ lines.append(' sed \'1!G;h;$!d\' $manifest | while read file')
+ lines.append(' do')
+ if dryrun:
+ lines.append(' if [ ! -d $file ] ; then')
+ lines.append(' echo $file')
+ lines.append(' fi')
+ else:
+ lines.append(' if [ -d $file ] ; then')
+ # Avoid deleting a preserved directory in case it has special perms
+ lines.append(' if [ ! -d $preservedir/$file ] ; then')
+ lines.append(' rmdir $file > /dev/null 2>&1 || true')
+ lines.append(' fi')
+ lines.append(' else')
+ lines.append(' rm $file')
+ lines.append(' fi')
+ lines.append(' done')
+ if not dryrun:
+ lines.append(' rm $manifest')
+ if not deploy and not dryrun:
+ # May as well remove all traces
+ lines.append(' rmdir `dirname $manifest` > /dev/null 2>&1 || true')
+ lines.append('fi')
+
+ if deploy:
+ if not nocheckspace:
+ # Check for available space
+ # FIXME This doesn't take into account files spread across multiple
+ # partitions, but doing that is non-trivial
+ # Find the part of the destination path that exists
+ lines.append('checkpath="$2"')
+ lines.append('while [ "$checkpath" != "/" ] && [ ! -e $checkpath ]')
+ lines.append('do')
+ lines.append(' checkpath=`dirname "$checkpath"`')
+ lines.append('done')
+ lines.append('freespace=`df -P $checkpath | sed "1d" | awk \'{ print $4 }\'`')
+ # First line of the file is the total space
+ lines.append('total=`head -n1 $3`')
+ lines.append('if [ $total -gt $freespace ] ; then')
+ lines.append(' echo "ERROR: insufficient space on target (available ${freespace}, needed ${total})"')
+ lines.append(' exit 1')
+ lines.append('fi')
+ if not nopreserve:
+ # Preserve any files that exist. Note that this will add to the
+ # preserved list with successive deployments if the list of files
+ # deployed changes, but because we've deleted any previously
+ # deployed files at this point it will never preserve anything
+ # that was deployed, only files that existed prior to any deploying
+ # (which makes the most sense)
+ lines.append('cat $3 | sed "1d" | while read file fsize')
+ lines.append('do')
+ lines.append(' if [ -e $file ] ; then')
+ lines.append(' dest="$preservedir/$file"')
+ lines.append(' mkdir -p `dirname $dest`')
+ lines.append(' mv $file $dest')
+ lines.append(' fi')
+ lines.append('done')
+ lines.append('rm $3')
+ lines.append('mkdir -p `dirname $manifest`')
+ lines.append('mkdir -p $2')
+ if verbose:
+ lines.append(' tar xv -C $2 -f - | tee $manifest')
+ else:
+ lines.append(' tar xv -C $2 -f - > $manifest')
+ lines.append('sed -i "s!^./!$2!" $manifest')
+ elif not dryrun:
+ # Put any preserved files back
+ lines.append('if [ -d $preservedir ] ; then')
+ lines.append(' cd $preservedir')
+ lines.append(' find . -type f -exec mv {} /{} \;')
+ lines.append(' cd /')
+ lines.append(' rm -rf $preservedir')
+ lines.append('fi')
+
+ if undeployall:
+ if not dryrun:
+ lines.append('echo "NOTE: Successfully undeployed $1"')
+ lines.append('done')
+
+ # Delete the script itself
+ lines.append('rm $0')
+ lines.append('')
+
+ return '\n'.join(lines)
+
+
+def deploy(args, config, basepath, workspace):
+ """Entry point for the devtool 'deploy' subcommand"""
+ import re
+ import math
+ import oe.recipeutils
+
+ check_workspace_recipe(workspace, args.recipename, checksrc=False)
+
+ try:
+ host, destdir = args.target.split(':')
+ except ValueError:
+ destdir = '/'
+ else:
+ args.target = host
+ if not destdir.endswith('/'):
+ destdir += '/'
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = oe.recipeutils.parse_recipe_simple(tinfoil.cooker, args.recipename, tinfoil.config_data)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+ recipe_outdir = rd.getVar('D', True)
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
+
+ filelist = []
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
+
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
+
+
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
+
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s \'sh %s %s %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ logger.info('Successfully deployed %s' % recipe_outdir)
+
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
+
+ return 0
+
+def undeploy(args, config, basepath, workspace):
+ """Entry point for the devtool 'undeploy' subcommand"""
+ if args.all and args.recipename:
+ raise argparse_oe.ArgumentUsageError('Cannot specify -a/--all with a recipe name', 'undeploy-target')
+ elif not args.recipename and not args.all:
+ raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target')
+
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
+
+ args.target = args.target.split(':')[0]
+
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_undeploy.sh'
+ shellscript = _prepare_remote_script(deploy=False, dryrun=args.dry_run, undeployall=args.all)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Copy it to the target
+ ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = subprocess.call('ssh %s %s \'sh %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ if ret != 0:
+ raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ if not args.all and not args.dry_run:
+ logger.info('Successfully undeployed %s' % args.recipename)
+ return 0
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the deploy plugin"""
+ parser_deploy = subparsers.add_parser('deploy-target',
+ help='Deploy recipe output files to live target machine',
+ description='Deploys a recipe\'s build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have been installed on the target beforehand.',
+ group='testbuild')
+ parser_deploy.add_argument('recipename', help='Recipe to deploy')
+ parser_deploy.add_argument('target', help='Live target machine running an ssh server: user@hostname[:destdir]')
+ parser_deploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_deploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
+ parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
+ parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.set_defaults(func=deploy)
+
+ parser_undeploy = subparsers.add_parser('undeploy-target',
+ help='Undeploy recipe output files in live target machine',
+ description='Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target.',
+ group='testbuild')
+ parser_undeploy.add_argument('recipename', help='Recipe to undeploy (if not using -a/--all)', nargs='?')
+ parser_undeploy.add_argument('target', help='Live target machine running an ssh server: user@hostname')
+ parser_undeploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
+ parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.set_defaults(func=undeploy)
diff --git a/scripts/lib/devtool/package.py b/scripts/lib/devtool/package.py
new file mode 100644
index 0000000..afb5809
--- /dev/null
+++ b/scripts/lib/devtool/package.py
@@ -0,0 +1,62 @@
+# Development tool - package command plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugin containing the package subcommands"""
+
+import os
+import subprocess
+import logging
+from bb.process import ExecutionError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def package(args, config, basepath, workspace):
+ """Entry point for the devtool 'package' subcommand"""
+ check_workspace_recipe(workspace, args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ tinfoil.prepare(config_only=True)
+
+ image_pkgtype = config.get('Package', 'image_pkgtype', '')
+ if not image_pkgtype:
+ image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE', True)
+
+ deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper(), True)
+ finally:
+ tinfoil.shutdown()
+
+ package_task = config.get('Package', 'package_task', 'package_write_%s' % image_pkgtype)
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c %s %s' % (package_task, args.recipename), watch=True)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+
+ logger.info('Your packages are in %s' % deploy_dir_pkg)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the package plugin"""
+ if context.fixed_setup:
+ parser_package = subparsers.add_parser('package',
+ help='Build packages for a recipe',
+ description='Builds packages for a recipe\'s output files',
+ group='testbuild', order=-5)
+ parser_package.add_argument('recipename', help='Recipe to package')
+ parser_package.set_defaults(func=package)
diff --git a/scripts/lib/devtool/runqemu.py b/scripts/lib/devtool/runqemu.py
new file mode 100644
index 0000000..daee7fb
--- /dev/null
+++ b/scripts/lib/devtool/runqemu.py
@@ -0,0 +1,65 @@
+# Development tool - runqemu command plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool runqemu plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import glob
+from devtool import exec_build_env_command, setup_tinfoil, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def runqemu(args, config, basepath, workspace):
+ """Entry point for the devtool 'runqemu' subcommand"""
+
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ machine = tinfoil.config_data.getVar('MACHINE', True)
+ bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
+ tinfoil.shutdown()
+
+ if not glob.glob(os.path.join(bindir_native, 'qemu-system-*')):
+ raise DevtoolError('QEMU is not available within this SDK')
+
+ imagename = args.imagename
+ if not imagename:
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ imagename = sdk_targets[0]
+ if not imagename:
+ raise DevtoolError('Unable to determine image name to run, please specify one')
+
+ try:
+ exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ if context.fixed_setup:
+ parser_runqemu = subparsers.add_parser('runqemu', help='Run QEMU on the specified image',
+ description='Runs QEMU to boot the specified image',
+ group='testbuild', order=-20)
+ parser_runqemu.add_argument('imagename', help='Name of built image to boot within QEMU', nargs='?')
+ parser_runqemu.add_argument('args', help='Any remaining arguments are passed to the runqemu script (pass --help after imagename to see what these are)',
+ nargs=argparse.REMAINDER)
+ parser_runqemu.set_defaults(func=runqemu)
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
new file mode 100644
index 0000000..46fd12b
--- /dev/null
+++ b/scripts/lib/devtool/sdk.py
@@ -0,0 +1,366 @@
+# Development tool - sdk-update command plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import subprocess
+import logging
+import glob
+import shutil
+import errno
+import sys
+import tempfile
+import re
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def parse_locked_sigs(sigfile_path):
+ """Return <pn:task>:<hash> dictionary"""
+ sig_dict = {}
+ with open(sigfile_path) as f:
+ lines = f.readlines()
+ for line in lines:
+ if ':' in line:
+ taskkey, _, hashval = line.rpartition(':')
+ sig_dict[taskkey.strip()] = hashval.split()[0]
+ return sig_dict
+
+def generate_update_dict(sigfile_new, sigfile_old):
+ """Return a dict containing <pn:task>:<hash> which indicates what need to be updated"""
+ update_dict = {}
+ sigdict_new = parse_locked_sigs(sigfile_new)
+ sigdict_old = parse_locked_sigs(sigfile_old)
+ for k in sigdict_new:
+ if k not in sigdict_old:
+ update_dict[k] = sigdict_new[k]
+ continue
+ if sigdict_new[k] != sigdict_old[k]:
+ update_dict[k] = sigdict_new[k]
+ continue
+ return update_dict
+
+def get_sstate_objects(update_dict, sstate_dir):
+ """Return a list containing sstate objects which are to be installed"""
+ sstate_objects = []
+ for k in update_dict:
+ files = set()
+ hashval = update_dict[k]
+ p = sstate_dir + '/' + hashval[:2] + '/*' + hashval + '*.tgz'
+ files |= set(glob.glob(p))
+ p = sstate_dir + '/*/' + hashval[:2] + '/*' + hashval + '*.tgz'
+ files |= set(glob.glob(p))
+ files = list(files)
+ if len(files) == 1:
+ sstate_objects.extend(files)
+ elif len(files) > 1:
+ logger.error("More than one matching sstate object found for %s" % hashval)
+
+ return sstate_objects
+
+def mkdir(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+def install_sstate_objects(sstate_objects, src_sdk, dest_sdk):
+ """Install sstate objects into destination SDK"""
+ sstate_dir = os.path.join(dest_sdk, 'sstate-cache')
+ if not os.path.exists(sstate_dir):
+ logger.error("Missing sstate-cache directory in %s, it might not be an extensible SDK." % dest_sdk)
+ raise
+ for sb in sstate_objects:
+ dst = sb.replace(src_sdk, dest_sdk)
+ destdir = os.path.dirname(dst)
+ mkdir(destdir)
+ logger.debug("Copying %s to %s" % (sb, dst))
+ shutil.copy(sb, dst)
+
+def check_manifest(fn, basepath):
+ import bb.utils
+ changedfiles = []
+ with open(fn, 'r') as f:
+ for line in f:
+ splitline = line.split()
+ if len(splitline) > 1:
+ chksum = splitline[0]
+ fpath = splitline[1]
+ curr_chksum = bb.utils.sha256_file(os.path.join(basepath, fpath))
+ if chksum != curr_chksum:
+ logger.debug('File %s changed: old csum = %s, new = %s' % (os.path.join(basepath, fpath), curr_chksum, chksum))
+ changedfiles.append(fpath)
+ return changedfiles
+
+def sdk_update(args, config, basepath, workspace):
+ # Fetch locked-sigs.inc file from remote/local destination
+ updateserver = args.updateserver
+ if not updateserver:
+ updateserver = config.get('SDK', 'updateserver', '')
+ logger.debug("updateserver: %s" % updateserver)
+
+ # Make sure we are using sdk-update from within SDK
+ logger.debug("basepath = %s" % basepath)
+ old_locked_sig_file_path = os.path.join(basepath, 'conf/locked-sigs.inc')
+ if not os.path.exists(old_locked_sig_file_path):
+ logger.error("Not using devtool's sdk-update command from within an extensible SDK. Please specify correct basepath via --basepath option")
+ return -1
+ else:
+ logger.debug("Found conf/locked-sigs.inc in %s" % basepath)
+
+ if ':' in updateserver:
+ is_remote = True
+ else:
+ is_remote = False
+
+ layers_dir = os.path.join(basepath, 'layers')
+ conf_dir = os.path.join(basepath, 'conf')
+
+ # Grab variable values
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR', True)
+ sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS', True)
+ site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION', True)
+ finally:
+ tinfoil.shutdown()
+
+ if not is_remote:
+ # devtool sdk-update /local/path/to/latest/sdk
+ new_locked_sig_file_path = os.path.join(updateserver, 'conf/locked-sigs.inc')
+ if not os.path.exists(new_locked_sig_file_path):
+ logger.error("%s doesn't exist or is not an extensible SDK" % updateserver)
+ return -1
+ else:
+ logger.debug("Found conf/locked-sigs.inc in %s" % updateserver)
+ update_dict = generate_update_dict(new_locked_sig_file_path, old_locked_sig_file_path)
+ logger.debug("update_dict = %s" % update_dict)
+ newsdk_path = updateserver
+ sstate_dir = os.path.join(newsdk_path, 'sstate-cache')
+ if not os.path.exists(sstate_dir):
+ logger.error("sstate-cache directory not found under %s" % newsdk_path)
+ return 1
+ sstate_objects = get_sstate_objects(update_dict, sstate_dir)
+ logger.debug("sstate_objects = %s" % sstate_objects)
+ if len(sstate_objects) == 0:
+ logger.info("No need to update.")
+ return 0
+ logger.info("Installing sstate objects into %s", basepath)
+ install_sstate_objects(sstate_objects, updateserver.rstrip('/'), basepath)
+ logger.info("Updating configuration files")
+ new_conf_dir = os.path.join(updateserver, 'conf')
+ shutil.rmtree(conf_dir)
+ shutil.copytree(new_conf_dir, conf_dir)
+ logger.info("Updating layers")
+ new_layers_dir = os.path.join(updateserver, 'layers')
+ shutil.rmtree(layers_dir)
+ ret = subprocess.call("cp -a %s %s" % (new_layers_dir, layers_dir), shell=True)
+ if ret != 0:
+ logger.error("Copying %s to %s failed" % (new_layers_dir, layers_dir))
+ return ret
+ else:
+ # devtool sdk-update http://myhost/sdk
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ os.makedirs(os.path.join(tmpsdk_dir, 'conf'))
+ new_locked_sig_file_path = os.path.join(tmpsdk_dir, 'conf', 'locked-sigs.inc')
+ # Fetch manifest from server
+ tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
+ ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
+ changedfiles = check_manifest(tmpmanifest, basepath)
+ if not changedfiles:
+ logger.info("Already up-to-date")
+ return 0
+ # Update metadata
+ logger.debug("Updating metadata via git ...")
+ #Check for the status before doing a fetch and reset
+ if os.path.exists(os.path.join(basepath, 'layers/.git')):
+ out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
+ if not out:
+ ret = subprocess.call("git fetch --all; git reset --hard", shell=True, cwd=layers_dir)
+ else:
+ logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
+ logger.error("Changed files:\n%s" % out);
+ return -1
+ else:
+ ret = -1
+ if ret != 0:
+ ret = subprocess.call("git clone %s/layers/.git" % updateserver, shell=True, cwd=tmpsdk_dir)
+ if ret != 0:
+ logger.error("Updating metadata via git failed")
+ return ret
+ logger.debug("Updating conf files ...")
+ for changedfile in changedfiles:
+ ret = subprocess.call("wget -q -O %s %s/%s" % (changedfile, updateserver, changedfile), shell=True, cwd=tmpsdk_dir)
+ if ret != 0:
+ logger.error("Updating %s failed" % changedfile)
+ return ret
+
+ # Check if UNINATIVE_CHECKSUM changed
+ uninative = False
+ if 'conf/local.conf' in changedfiles:
+ def read_uninative_checksums(fn):
+ chksumitems = []
+ with open(fn, 'r') as f:
+ for line in f:
+ if line.startswith('UNINATIVE_CHECKSUM'):
+ splitline = re.split(r'[\[\]"\']', line)
+ if len(splitline) > 3:
+ chksumitems.append((splitline[1], splitline[3]))
+ return chksumitems
+
+ oldsums = read_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
+ newsums = read_uninative_checksums(os.path.join(tmpsdk_dir, 'conf/local.conf'))
+ if oldsums != newsums:
+ uninative = True
+ for buildarch, chksum in newsums:
+ uninative_file = os.path.join('downloads', 'uninative', chksum, '%s-nativesdk-libc.tar.bz2' % buildarch)
+ mkdir(os.path.join(tmpsdk_dir, os.path.dirname(uninative_file)))
+ ret = subprocess.call("wget -q -O %s %s/%s" % (uninative_file, updateserver, uninative_file), shell=True, cwd=tmpsdk_dir)
+
+ # Ok, all is well at this point - move everything over
+ tmplayers_dir = os.path.join(tmpsdk_dir, 'layers')
+ if os.path.exists(tmplayers_dir):
+ shutil.rmtree(layers_dir)
+ shutil.move(tmplayers_dir, layers_dir)
+ for changedfile in changedfiles:
+ destfile = os.path.join(basepath, changedfile)
+ os.remove(destfile)
+ shutil.move(os.path.join(tmpsdk_dir, changedfile), destfile)
+ os.remove(os.path.join(conf_dir, 'sdk-conf-manifest'))
+ shutil.move(tmpmanifest, conf_dir)
+ if uninative:
+ shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
+ shutil.move(os.path.join(tmpsdk_dir, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
+
+ if not sstate_mirrors:
+ with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
+ f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
+ f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ finally:
+ shutil.rmtree(tmpsdk_dir)
+
+ if not args.skip_prepare:
+ # Find all potentially updateable tasks
+ sdk_update_targets = []
+ tasks = ['do_populate_sysroot', 'do_packagedata']
+ for root, _, files in os.walk(stamps_dir):
+ for fn in files:
+ if not '.sigdata.' in fn:
+ for task in tasks:
+ if '.%s.' % task in fn or '.%s_setscene.' % task in fn:
+ sdk_update_targets.append('%s:%s' % (os.path.basename(root), task))
+ # Run bitbake command for the whole SDK
+ logger.info("Preparing build system... (This may take some time.)")
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake --setscene-only %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
+ output, _ = exec_build_env_command(config.init_path, basepath, 'bitbake -n %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
+ runlines = []
+ for line in output.splitlines():
+ if 'Running task ' in line:
+ runlines.append(line)
+ if runlines:
+ logger.error('Unexecuted tasks found in preparation log:\n %s' % '\n '.join(runlines))
+ return -1
+ except bb.process.ExecutionError as e:
+ logger.error('Preparation failed:\n%s' % e.stdout)
+ return -1
+ return 0
+
+def sdk_install(args, config, basepath, workspace):
+ """Entry point for the devtool sdk-install command"""
+
+ import oe.recipeutils
+ import bb.process
+
+ for recipe in args.recipename:
+ if recipe in workspace:
+ raise DevtoolError('recipe %s is a recipe in your workspace' % recipe)
+
+ tasks = ['do_populate_sysroot', 'do_packagedata']
+ stampprefixes = {}
+ def checkstamp(recipe):
+ stampprefix = stampprefixes[recipe]
+ stamps = glob.glob(stampprefix + '*')
+ for stamp in stamps:
+ if '.sigdata.' not in stamp and stamp.startswith((stampprefix + '.', stampprefix + '_setscene.')):
+ return True
+ else:
+ return False
+
+ install_recipes = []
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ for recipe in args.recipename:
+ rd = parse_recipe(config, tinfoil, recipe, True)
+ if not rd:
+ return 1
+ stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP', True), tasks[0])
+ if checkstamp(recipe):
+ logger.info('%s is already installed' % recipe)
+ else:
+ install_recipes.append(recipe)
+ finally:
+ tinfoil.shutdown()
+
+ if install_recipes:
+ logger.info('Installing %s...' % ', '.join(install_recipes))
+ install_tasks = []
+ for recipe in install_recipes:
+ for task in tasks:
+ if recipe.endswith('-native') and 'package' in task:
+ continue
+ install_tasks.append('%s:%s' % (recipe, task))
+ options = ''
+ if not args.allow_build:
+ options += ' --setscene-only'
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake %s %s' % (options, ' '.join(install_tasks)), watch=True)
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Failed to install %s:\n%s' % (recipe, str(e)))
+ failed = False
+ for recipe in install_recipes:
+ if checkstamp(recipe):
+ logger.info('Successfully installed %s' % recipe)
+ else:
+ raise DevtoolError('Failed to install %s - unavailable' % recipe)
+ failed = True
+ if failed:
+ return 2
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the sdk plugin"""
+ if context.fixed_setup:
+ parser_sdk = subparsers.add_parser('sdk-update',
+ help='Update SDK components',
+ description='Updates installed SDK components from a remote server',
+ group='sdk')
+ updateserver = context.config.get('SDK', 'updateserver', '')
+ if updateserver:
+ parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from (default %s)' % updateserver, nargs='?')
+ else:
+ parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from')
+ parser_sdk.add_argument('--skip-prepare', action="store_true", help='Skip re-preparing the build system after updating (for debugging only)')
+ parser_sdk.set_defaults(func=sdk_update)
+
+ parser_sdk_install = subparsers.add_parser('sdk-install',
+ help='Install additional SDK components',
+ description='Installs additional recipe development files into the SDK. (You can use "devtool search" to find available recipes.)',
+ group='sdk')
+ parser_sdk_install.add_argument('recipename', help='Name of the recipe to install the development artifacts for', nargs='+')
+ parser_sdk_install.add_argument('-s', '--allow-build', help='Allow building requested item(s) from source', action='store_true')
+ parser_sdk_install.set_defaults(func=sdk_install)
diff --git a/scripts/lib/devtool/search.py b/scripts/lib/devtool/search.py
new file mode 100644
index 0000000..b44bed7
--- /dev/null
+++ b/scripts/lib/devtool/search.py
@@ -0,0 +1,88 @@
+# Development tool - search command plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool search plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+from devtool import setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def search(args, config, basepath, workspace):
+ """Entry point for the devtool 'search' subcommand"""
+
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ defsummary = tinfoil.config_data.getVar('SUMMARY', False) or ''
+
+ keyword_rc = re.compile(args.keyword)
+
+ for fn in os.listdir(pkgdata_dir):
+ pfn = os.path.join(pkgdata_dir, fn)
+ if not os.path.isfile(pfn):
+ continue
+
+ packages = []
+ match = False
+ if keyword_rc.search(fn):
+ match = True
+
+ if not match:
+ with open(pfn, 'r') as f:
+ for line in f:
+ if line.startswith('PACKAGES:'):
+ packages = line.split(':', 1)[1].strip().split()
+
+ for pkg in packages:
+ if keyword_rc.search(pkg):
+ match = True
+ break
+ if os.path.exists(os.path.join(pkgdata_dir, 'runtime', pkg + '.packaged')):
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
+ for line in f:
+ if ': ' in line:
+ splitline = line.split(':', 1)
+ key = splitline[0]
+ value = splitline[1].strip()
+ if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ if keyword_rc.search(value):
+ match = True
+ break
+
+ if match:
+ rd = parse_recipe(config, tinfoil, fn, True)
+ summary = rd.getVar('SUMMARY', True)
+ if summary == rd.expand(defsummary):
+ summary = ''
+ print("%s %s" % (fn.ljust(20), summary))
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_search = subparsers.add_parser('search', help='Search available recipes',
+ description='Searches for available target recipes. Matches on recipe name, package name, description and installed files, and prints the recipe name on match.',
+ group='info')
+ parser_search.add_argument('keyword', help='Keyword to search for (regular expression syntax allowed)')
+ parser_search.set_defaults(func=search, no_workspace=True)
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
new file mode 100644
index 0000000..f414860
--- /dev/null
+++ b/scripts/lib/devtool/standard.py
@@ -0,0 +1,1452 @@
+# Development tool - standard commands plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool standard plugins"""
+
+import os
+import sys
+import re
+import shutil
+import subprocess
+import tempfile
+import logging
+import argparse
+import argparse_oe
+import scriptutils
+import errno
+import glob
+from collections import OrderedDict
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, DevtoolError
+from devtool import parse_recipe
+
+logger = logging.getLogger('devtool')
+
+
+def add(args, config, basepath, workspace):
+ """Entry point for the devtool 'add' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ if not args.recipename and not args.srctree and not args.fetch and not args.fetchuri:
+ raise argparse_oe.ArgumentUsageError('At least one of recipename, srctree, fetchuri or -f/--fetch must be specified', 'add')
+
+ # These are positional arguments, but because we're nice, allow
+ # specifying e.g. source tree without name, or fetch URI without name or
+ # source tree (if we can detect that that is what the user meant)
+ if '://' in args.recipename:
+ if not args.fetchuri:
+ if args.fetch:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ args.fetchuri = args.recipename
+ args.recipename = ''
+ elif args.srctree and '://' in args.srctree:
+ if not args.fetchuri:
+ if args.fetch:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ args.fetchuri = args.srctree
+ args.srctree = ''
+ elif args.recipename and not args.srctree:
+ if os.sep in args.recipename:
+ args.srctree = args.recipename
+ args.recipename = None
+ elif os.path.isdir(args.recipename):
+ logger.warn('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
+
+ if args.fetch:
+ if args.fetchuri:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ else:
+ # FIXME should show a warning that -f/--fetch is deprecated here
+ args.fetchuri = args.fetch
+
+ if args.recipename:
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ args.recipename)
+ reason = oe.recipeutils.validate_pn(args.recipename)
+ if reason:
+ raise DevtoolError(reason)
+
+ # FIXME this ought to be in validate_pn but we're using that in other contexts
+ if '/' in args.recipename:
+ raise DevtoolError('"/" is not a valid character in recipe names')
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ srctreeparent = None
+ tmpsrcdir = None
+ else:
+ srctree = None
+ srctreeparent = get_default_srctree(config)
+ bb.utils.mkdirhier(srctreeparent)
+ tmpsrcdir = tempfile.mkdtemp(prefix='devtoolsrc', dir=srctreeparent)
+
+ if srctree and os.path.exists(srctree):
+ if args.fetchuri:
+ if not os.path.isdir(srctree):
+ raise DevtoolError("Cannot fetch into source tree path %s as "
+ "it exists and is not a directory" %
+ srctree)
+ elif os.listdir(srctree):
+ raise DevtoolError("Cannot fetch into source tree path %s as "
+ "it already exists and is non-empty" %
+ srctree)
+ elif not args.fetchuri:
+ if args.srctree:
+ raise DevtoolError("Specified source tree %s could not be found" %
+ args.srctree)
+ elif srctree:
+ raise DevtoolError("No source tree exists at default path %s - "
+ "either create and populate this directory, "
+ "or specify a path to a source tree, or a "
+ "URI to fetch source from" % srctree)
+ else:
+ raise DevtoolError("You must either specify a source tree "
+ "or a URI to fetch source from")
+
+ if args.version:
+ if '_' in args.version or ' ' in args.version:
+ raise DevtoolError('Invalid version string "%s"' % args.version)
+
+ if args.color == 'auto' and sys.stdout.isatty():
+ color = 'always'
+ else:
+ color = args.color
+ extracmdopts = ''
+ if args.fetchuri:
+ source = args.fetchuri
+ if srctree:
+ extracmdopts += ' -x %s' % srctree
+ else:
+ extracmdopts += ' -x %s' % tmpsrcdir
+ else:
+ source = srctree
+ if args.recipename:
+ extracmdopts += ' -N %s' % args.recipename
+ if args.version:
+ extracmdopts += ' -V %s' % args.version
+ if args.binary:
+ extracmdopts += ' -b'
+ if args.also_native:
+ extracmdopts += ' --also-native'
+ if args.src_subdir:
+ extracmdopts += ' --src-subdir "%s"' % args.src_subdir
+
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create -o %s "%s" %s' % (color, tempdir, source, extracmdopts))
+ except bb.process.ExecutionError as e:
+ if e.exitcode == 15:
+ raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
+ else:
+ raise DevtoolError('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+
+ recipes = glob.glob(os.path.join(tempdir, '*.bb'))
+ if recipes:
+ recipename = os.path.splitext(os.path.basename(recipes[0]))[0].split('_')[0]
+ if recipename in workspace:
+ raise DevtoolError('A recipe with the same name as the one being created (%s) already exists in your workspace' % recipename)
+ recipedir = os.path.join(config.workspace_path, 'recipes', recipename)
+ bb.utils.mkdirhier(recipedir)
+ recipefile = os.path.join(recipedir, os.path.basename(recipes[0]))
+ appendfile = recipe_to_append(recipefile, config)
+ if os.path.exists(appendfile):
+ # This shouldn't be possible, but just in case
+ raise DevtoolError('A recipe with the same name as the one being created already exists in your workspace')
+ if os.path.exists(recipefile):
+ raise DevtoolError('A recipe file %s already exists in your workspace; this shouldn\'t be there - please delete it before continuing' % recipefile)
+ if tmpsrcdir:
+ srctree = os.path.join(srctreeparent, recipename)
+ if os.path.exists(tmpsrcdir):
+ if os.path.exists(srctree):
+ if os.path.isdir(srctree):
+ try:
+ os.rmdir(srctree)
+ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+ raise DevtoolError('Source tree path %s already exists and is not empty' % srctree)
+ else:
+ raise
+ else:
+ raise DevtoolError('Source tree path %s already exists and is not a directory' % srctree)
+ logger.info('Using default source tree path %s' % srctree)
+ shutil.move(tmpsrcdir, srctree)
+ else:
+ raise DevtoolError('Couldn\'t find source tree created by recipetool')
+ bb.utils.mkdirhier(recipedir)
+ shutil.move(recipes[0], recipefile)
+ # Move any additional files created by recipetool
+ for fn in os.listdir(tempdir):
+ shutil.move(os.path.join(tempdir, fn), recipedir)
+ else:
+ raise DevtoolError('Command \'%s\' did not create any recipe file:\n%s' % (e.command, e.stdout))
+ attic_recipe = os.path.join(config.workspace_path, 'attic', recipename, os.path.basename(recipefile))
+ if os.path.exists(attic_recipe):
+ logger.warn('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
+ finally:
+ if tmpsrcdir and os.path.exists(tmpsrcdir):
+ shutil.rmtree(tmpsrcdir)
+ shutil.rmtree(tempdir)
+
+ for fn in os.listdir(recipedir):
+ _add_md5(config, recipename, os.path.join(recipedir, fn))
+
+ if args.fetchuri and not args.no_git:
+ setup_git_repo(srctree, args.version, 'devtool')
+
+ initial_rev = None
+ if os.path.exists(os.path.join(srctree, '.git')):
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
+
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ rd = oe.recipeutils.parse_recipe(recipefile, None, tinfoil.config_data)
+ if not rd:
+ return 1
+
+ if args.src_subdir:
+ srctree = os.path.join(srctree, args.src_subdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('inherit externalsrc\n')
+ f.write('EXTERNALSRC = "%s"\n' % srctree)
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+
+ if args.binary:
+ f.write('do_install_append() {\n')
+ f.write(' rm -rf ${D}/.git\n')
+ f.write(' rm -f ${D}/singletask.lock\n')
+ f.write('}\n')
+
+ if bb.data.inherits_class('npm', rd):
+ f.write('do_install_append() {\n')
+ f.write(' # Remove files added to source dir by devtool/externalsrc\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
+ f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
+ f.write(' done\n')
+ f.write('}\n')
+
+ _add_md5(config, recipename, appendfile)
+
+ logger.info('Recipe %s has been automatically created; further editing may be required to make it fully functional' % recipefile)
+
+ tinfoil.shutdown()
+
+ return 0
+
+
+def _check_compatible_recipe(pn, d):
+ """Check if the recipe is supported by devtool"""
+ if pn == 'perf':
+ raise DevtoolError("The perf recipe does not actually check out "
+ "source and thus cannot be supported by this tool")
+
+ if pn in ['kernel-devsrc', 'package-index'] or pn.startswith('gcc-source'):
+ raise DevtoolError("The %s recipe is not supported by this tool" % pn)
+
+ if bb.data.inherits_class('image', d):
+ raise DevtoolError("The %s recipe is an image, and therefore is not "
+ "supported by this tool" % pn)
+
+ if bb.data.inherits_class('populate_sdk', d):
+ raise DevtoolError("The %s recipe is an SDK, and therefore is not "
+ "supported by this tool" % pn)
+
+ if bb.data.inherits_class('packagegroup', d):
+ raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
+ "not supported by this tool" % pn)
+
+ if bb.data.inherits_class('meta', d):
+ raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
+ "not supported by this tool" % pn)
+
+ if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC', True):
+ raise DevtoolError("externalsrc is currently enabled for the %s "
+ "recipe. This prevents the normal do_patch task "
+ "from working. You will need to disable this "
+ "first." % pn)
+
+def _move_file(src, dst):
+ """Move a file. Creates all the directory components of destination path."""
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.move(src, dst)
+
+def _git_ls_tree(repodir, treeish='HEAD', recursive=False):
+ """List contents of a git treeish"""
+ import bb
+ cmd = ['git', 'ls-tree', '-z', treeish]
+ if recursive:
+ cmd.append('-r')
+ out, _ = bb.process.run(cmd, cwd=repodir)
+ ret = {}
+ for line in out.split('\0'):
+ if line:
+ split = line.split(None, 4)
+ ret[split[3]] = split[0:3]
+ return ret
+
+def _git_exclude_path(srctree, path):
+ """Return pathspec (list of paths) that excludes certain path"""
+ # NOTE: "Filtering out" files/paths in this way is not entirely reliable -
+ # we don't catch files that are deleted, for example. A more reliable way
+ # to implement this would be to use "negative pathspecs" which were
+ # introduced in Git v1.9.0. Revisit this when/if the required Git version
+ # becomes greater than that.
+ path = os.path.normpath(path)
+ recurse = True if len(path.split(os.path.sep)) > 1 else False
+ git_files = _git_ls_tree(srctree, 'HEAD', recurse).keys()
+ if path in git_files:
+ git_files.remove(path)
+ return git_files
+ else:
+ return ['.']
+
+def _ls_tree(directory):
+ """Recursive listing of files in a directory"""
+ ret = []
+ for root, dirs, files in os.walk(directory):
+ ret.extend([os.path.relpath(os.path.join(root, fname), directory) for
+ fname in files])
+ return ret
+
+
+def extract(args, config, basepath, workspace):
+ """Entry point for the devtool 'extract' subcommand"""
+ import bb
+
+ tinfoil = _prep_extract_operation(config, basepath, args.recipename)
+ if not tinfoil:
+ # Error already shown
+ return 1
+
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ srctree = os.path.abspath(args.srctree)
+ initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd)
+ logger.info('Source tree extracted to %s' % srctree)
+
+ if initial_rev:
+ return 0
+ else:
+ return 1
+
+def sync(args, config, basepath, workspace):
+ """Entry point for the devtool 'sync' subcommand"""
+ import bb
+
+ tinfoil = _prep_extract_operation(config, basepath, args.recipename)
+ if not tinfoil:
+ # Error already shown
+ return 1
+
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ srctree = os.path.abspath(args.srctree)
+ initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd)
+ logger.info('Source tree %s synchronized' % srctree)
+
+ if initial_rev:
+ return 0
+ else:
+ return 1
+
+class BbTaskExecutor(object):
+ """Class for executing bitbake tasks for a recipe
+
+ FIXME: This is very awkward. Unfortunately it's not currently easy to
+ properly execute tasks outside of bitbake itself, until then this has to
+ suffice if we are to handle e.g. linux-yocto's extra tasks
+ """
+
+ def __init__(self, rdata):
+ self.rdata = rdata
+ self.executed = []
+
+ def exec_func(self, func, report):
+ """Run bitbake task function"""
+ if not func in self.executed:
+ deps = self.rdata.getVarFlag(func, 'deps', False)
+ if deps:
+ for taskdepfunc in deps:
+ self.exec_func(taskdepfunc, True)
+ if report:
+ logger.info('Executing %s...' % func)
+ fn = self.rdata.getVar('FILE', True)
+ localdata = bb.build._task_data(fn, func, self.rdata)
+ try:
+ bb.build.exec_func(func, localdata)
+ except bb.build.FuncFailed as e:
+ raise DevtoolError(str(e))
+ self.executed.append(func)
+
+
+class PatchTaskExecutor(BbTaskExecutor):
+ def __init__(self, rdata):
+ self.check_git = False
+ super(PatchTaskExecutor, self).__init__(rdata)
+
+ def exec_func(self, func, report):
+ from oe.patch import GitApplyTree
+ srcsubdir = self.rdata.getVar('S', True)
+ haspatches = False
+ if func == 'do_patch':
+ patchdir = os.path.join(srcsubdir, 'patches')
+ if os.path.exists(patchdir):
+ if os.listdir(patchdir):
+ haspatches = True
+ else:
+ os.rmdir(patchdir)
+
+ super(PatchTaskExecutor, self).exec_func(func, report)
+ if self.check_git and os.path.exists(srcsubdir):
+ if func == 'do_patch':
+ if os.path.exists(patchdir):
+ shutil.rmtree(patchdir)
+ if haspatches:
+ stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
+ if stdout:
+ bb.process.run('git checkout patches', cwd=srcsubdir)
+
+ stdout, _ = bb.process.run('git status --porcelain', cwd=srcsubdir)
+ if stdout:
+ bb.process.run('git add .; git commit -a -m "Committing changes from %s\n\n%s"' % (func, GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
+
+
+def _prep_extract_operation(config, basepath, recipename, tinfoil=None):
+ """HACK: Ugly workaround for making sure that requirements are met when
+ trying to extract a package. Returns the tinfoil instance to be used."""
+ if not tinfoil:
+ tinfoil = setup_tinfoil(basepath=basepath)
+
+ rd = parse_recipe(config, tinfoil, recipename, True)
+ if not rd:
+ return None
+
+ if bb.data.inherits_class('kernel-yocto', rd):
+ tinfoil.shutdown()
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath,
+ 'bitbake kern-tools-native')
+ tinfoil = setup_tinfoil(basepath=basepath)
+ except bb.process.ExecutionError as err:
+ raise DevtoolError("Failed to build kern-tools-native:\n%s" %
+ err.stdout)
+ return tinfoil
+
+
+def _extract_source(srctree, keep_temp, devbranch, sync, d):
+ """Extract sources of a recipe"""
+ import bb.event
+ import oe.recipeutils
+
+ def eventfilter(name, handler, event, d):
+ """Bitbake event filter for devtool extract operation"""
+ if name == 'base_eventhandler':
+ return True
+ else:
+ return False
+
+ if hasattr(bb.event, 'set_eventfilter'):
+ bb.event.set_eventfilter(eventfilter)
+
+ pn = d.getVar('PN', True)
+
+ _check_compatible_recipe(pn, d)
+
+ if sync:
+ if not os.path.exists(srctree):
+ raise DevtoolError("output path %s does not exist" % srctree)
+ else:
+ if os.path.exists(srctree):
+ if not os.path.isdir(srctree):
+ raise DevtoolError("output path %s exists and is not a directory" %
+ srctree)
+ elif os.listdir(srctree):
+ raise DevtoolError("output path %s already exists and is "
+ "non-empty" % srctree)
+
+ if 'noexec' in (d.getVarFlags('do_unpack', False) or []):
+ raise DevtoolError("The %s recipe has do_unpack disabled, unable to "
+ "extract source" % pn)
+
+ if not sync:
+ # Prepare for shutil.move later on
+ bb.utils.mkdirhier(srctree)
+ os.rmdir(srctree)
+
+ # We don't want notes to be printed, they are too verbose
+ origlevel = bb.logger.getEffectiveLevel()
+ if logger.getEffectiveLevel() > logging.DEBUG:
+ bb.logger.setLevel(logging.WARNING)
+
+ initial_rev = None
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ crd = d.createCopy()
+ # Make a subdir so we guard against WORKDIR==S
+ workdir = os.path.join(tempdir, 'workdir')
+ crd.setVar('WORKDIR', workdir)
+ crd.setVar('T', os.path.join(tempdir, 'temp'))
+ if not crd.getVar('S', True).startswith(workdir):
+ # Usually a shared workdir recipe (kernel, gcc)
+ # Try to set a reasonable default
+ if bb.data.inherits_class('kernel', d):
+ crd.setVar('S', '${WORKDIR}/source')
+ else:
+ crd.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S', True)))
+ if bb.data.inherits_class('kernel', d):
+ # We don't want to move the source to STAGING_KERNEL_DIR here
+ crd.setVar('STAGING_KERNEL_DIR', '${S}')
+
+ task_executor = PatchTaskExecutor(crd)
+
+ crd.setVar('EXTERNALSRC_forcevariable', '')
+
+ logger.info('Fetching %s...' % pn)
+ task_executor.exec_func('do_fetch', False)
+ logger.info('Unpacking...')
+ task_executor.exec_func('do_unpack', False)
+ if bb.data.inherits_class('kernel-yocto', d):
+ # Extra step for kernel to populate the source directory
+ logger.info('Doing kernel checkout...')
+ task_executor.exec_func('do_kernel_checkout', False)
+ srcsubdir = crd.getVar('S', True)
+
+ task_executor.check_git = True
+
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in
+ oe.recipeutils.get_recipe_patches(crd)]
+ local_files = oe.recipeutils.get_recipe_local_files(crd)
+ local_files = [fname for fname in local_files if
+ os.path.exists(os.path.join(workdir, fname))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname),
+ os.path.join(tempdir, 'oe-local-files', fname))
+ with open(os.path.join(tempdir, 'oe-local-files', '.gitignore'),
+ 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file '
+ 'if you want to commit the directory to Git\n*\n')
+
+ if srcsubdir == workdir:
+ # Find non-patch non-local sources that were "unpacked" to srctree
+ # directory
+ src_files = [fname for fname in _ls_tree(workdir) if
+ os.path.basename(fname) not in recipe_patches]
+ # Force separate S so that patch files can be left out from srctree
+ srcsubdir = tempfile.mkdtemp(dir=workdir)
+ crd.setVar('S', srcsubdir)
+ # Move source files to S
+ for path in src_files:
+ _move_file(os.path.join(workdir, path),
+ os.path.join(srcsubdir, path))
+ elif os.path.dirname(srcsubdir) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.join(workdir, os.path.relpath(srcsubdir, workdir).split(os.sep)[0])
+
+ scriptutils.git_convert_standalone_clone(srcsubdir)
+
+ # Make sure that srcsubdir exists
+ bb.utils.mkdirhier(srcsubdir)
+ if not os.path.exists(srcsubdir) or not os.listdir(srcsubdir):
+ logger.warning("no source unpacked to S, either the %s recipe "
+ "doesn't use any source or the correct source "
+ "directory could not be determined" % pn)
+
+ setup_git_repo(srcsubdir, crd.getVar('PV', True), devbranch)
+
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
+ initial_rev = stdout.rstrip()
+
+ crd.setVar('PATCHTOOL', 'git')
+
+ logger.info('Patching...')
+ task_executor.exec_func('do_patch', False)
+
+ bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
+
+ kconfig = None
+ if bb.data.inherits_class('kernel-yocto', d):
+ # Store generate and store kernel config
+ logger.info('Generating kernel config')
+ task_executor.exec_func('do_configure', False)
+ kconfig = os.path.join(crd.getVar('B', True), '.config')
+
+
+ tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
+ srctree_localdir = os.path.join(srctree, 'oe-local-files')
+
+ if sync:
+ bb.process.run('git fetch file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
+
+ # Move oe-local-files directory to srctree
+ # As the oe-local-files is not part of the constructed git tree,
+ # remove them directly during the synchrounizating might surprise
+ # the users. Instead, we move it to oe-local-files.bak and remind
+ # user in the log message.
+ if os.path.exists(srctree_localdir + '.bak'):
+ shutil.rmtree(srctree_localdir, srctree_localdir + '.bak')
+
+ if os.path.exists(srctree_localdir):
+ logger.info('Backing up current local file directory %s' % srctree_localdir)
+ shutil.move(srctree_localdir, srctree_localdir + '.bak')
+
+ if os.path.exists(tempdir_localdir):
+ logger.info('Syncing local source files to srctree...')
+ shutil.copytree(tempdir_localdir, srctree_localdir)
+ else:
+ # Move oe-local-files directory to srctree
+ if os.path.exists(tempdir_localdir):
+ logger.info('Adding local source files to srctree...')
+ shutil.move(tempdir_localdir, srcsubdir)
+
+ shutil.move(srcsubdir, srctree)
+
+ if kconfig:
+ logger.info('Copying kernel config to srctree')
+ shutil.copy2(kconfig, srctree)
+
+ finally:
+ bb.logger.setLevel(origlevel)
+
+ if keep_temp:
+ logger.info('Preserving temporary directory %s' % tempdir)
+ else:
+ shutil.rmtree(tempdir)
+ return initial_rev
+
+def _add_md5(config, recipename, filename):
+ """Record checksum of a file (or recursively for a directory) to the md5-file of the workspace"""
+ import bb.utils
+
+ def addfile(fn):
+ md5 = bb.utils.md5_file(fn)
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'a') as f:
+ f.write('%s|%s|%s\n' % (recipename, os.path.relpath(fn, config.workspace_path), md5))
+
+ if os.path.isdir(filename):
+ for root, _, files in os.walk(filename):
+ for f in files:
+ addfile(os.path.join(root, f))
+ else:
+ addfile(filename)
+
+def _check_preserve(config, recipename):
+ """Check if a file was manually changed and needs to be saved in 'attic'
+ directory"""
+ import bb.utils
+ origfile = os.path.join(config.workspace_path, '.devtool_md5')
+ newfile = os.path.join(config.workspace_path, '.devtool_md5_new')
+ preservepath = os.path.join(config.workspace_path, 'attic', recipename)
+ with open(origfile, 'r') as f:
+ with open(newfile, 'w') as tf:
+ for line in f.readlines():
+ splitline = line.rstrip().split('|')
+ if splitline[0] == recipename:
+ removefile = os.path.join(config.workspace_path, splitline[1])
+ try:
+ md5 = bb.utils.md5_file(removefile)
+ except IOError as err:
+ if err.errno == 2:
+ # File no longer exists, skip it
+ continue
+ else:
+ raise
+ if splitline[2] != md5:
+ bb.utils.mkdirhier(preservepath)
+ preservefile = os.path.basename(removefile)
+ logger.warn('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
+ shutil.move(removefile, os.path.join(preservepath, preservefile))
+ else:
+ os.remove(removefile)
+ else:
+ tf.write(line)
+ os.rename(newfile, origfile)
+
+def modify(args, config, basepath, workspace):
+ """Entry point for the devtool 'modify' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ pn = rd.getVar('PN', True)
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ pn)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = get_default_srctree(config, pn)
+
+ if args.no_extract and not os.path.isdir(srctree):
+ raise DevtoolError("--no-extract specified and source path %s does "
+ "not exist or is not a directory" %
+ srctree)
+ if not args.no_extract:
+ tinfoil = _prep_extract_operation(config, basepath, pn, tinfoil)
+ if not tinfoil:
+ # Error already shown
+ return 1
+
+ recipefile = rd.getVar('FILE', True)
+ appendfile = recipe_to_append(recipefile, config, args.wildcard)
+ if os.path.exists(appendfile):
+ raise DevtoolError("Another variant of recipe %s is already in your "
+ "workspace (only one variant of a recipe can "
+ "currently be worked on at once)"
+ % pn)
+
+ _check_compatible_recipe(pn, rd)
+
+ initial_rev = None
+ commits = []
+ if not args.no_extract:
+ initial_rev = _extract_source(srctree, False, args.branch, False, rd)
+ if not initial_rev:
+ return 1
+ logger.info('Source tree extracted to %s' % srctree)
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
+ commits = stdout.split()
+ else:
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Check if it's a tree previously extracted by us
+ try:
+ (stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
+ except bb.process.ExecutionError:
+ stdout = ''
+ for line in stdout.splitlines():
+ if line.startswith('*'):
+ (stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
+ initial_rev = stdout.rstrip()
+ if not initial_rev:
+ # Otherwise, just grab the head revision
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
+
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(rd.getVar('S', True))
+ workdir = os.path.abspath(rd.getVar('WORKDIR', True))
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH_prepend := "%s:"\n' %
+ os.path.join(srctree, 'oe-local-files'))
+
+ f.write('\ninherit externalsrc\n')
+ f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
+ f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+
+ if bb.data.inherits_class('kernel', rd):
+ f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
+ 'do_fetch do_unpack do_patch do_kernel_configme do_kernel_configcheck"\n')
+ f.write('\ndo_configure_append() {\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ '}\n')
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+ for commit in commits:
+ f.write('# commit: %s\n' % commit)
+
+ _add_md5(config, pn, appendfile)
+
+ logger.info('Recipe %s now set up to build from %s' % (pn, srctree))
+
+ tinfoil.shutdown()
+
+ return 0
+
+def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
+ """Get initial and update rev of a recipe. These are the start point of the
+ whole patchset and start point for the patches to be re-generated/updated.
+ """
+ import bb
+
+ # Parse initial rev from recipe if not specified
+ commits = []
+ with open(recipe_path, 'r') as f:
+ for line in f:
+ if line.startswith('# initial_rev:'):
+ if not initial_rev:
+ initial_rev = line.split(':')[-1].strip()
+ elif line.startswith('# commit:'):
+ commits.append(line.split(':')[-1].strip())
+
+ update_rev = initial_rev
+ changed_revs = None
+ if initial_rev:
+ # Find first actually changed revision
+ stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
+ initial_rev, cwd=srctree)
+ newcommits = stdout.split()
+ for i in xrange(min(len(commits), len(newcommits))):
+ if newcommits[i] == commits[i]:
+ update_rev = commits[i]
+
+ try:
+ stdout, _ = bb.process.run('git cherry devtool-patched',
+ cwd=srctree)
+ except bb.process.ExecutionError as err:
+ stdout = None
+
+ if stdout is not None:
+ changed_revs = []
+ for line in stdout.splitlines():
+ if line.startswith('+ '):
+ rev = line.split()[1]
+ if rev in newcommits:
+ changed_revs.append(rev)
+
+ return initial_rev, update_rev, changed_revs
+
+def _remove_file_entries(srcuri, filelist):
+ """Remove file:// entries from SRC_URI"""
+ remaining = filelist[:]
+ entries = []
+ for fname in filelist:
+ basename = os.path.basename(fname)
+ for i in xrange(len(srcuri)):
+ if (srcuri[i].startswith('file://') and
+ os.path.basename(srcuri[i].split(';')[0]) == basename):
+ entries.append(srcuri[i])
+ remaining.remove(fname)
+ srcuri.pop(i)
+ break
+ return entries, remaining
+
+def _remove_source_files(args, files, destpath):
+ """Unlink existing patch files"""
+ for path in files:
+ if args.append:
+ if not destpath:
+ raise Exception('destpath should be set here')
+ path = os.path.join(destpath, os.path.basename(path))
+
+ if os.path.exists(path):
+ logger.info('Removing file %s' % path)
+ # FIXME "git rm" here would be nice if the file in question is
+ # tracked
+ # FIXME there's a chance that this file is referred to by
+ # another recipe, in which case deleting wouldn't be the
+ # right thing to do
+ os.remove(path)
+ # Remove directory if empty
+ try:
+ os.rmdir(os.path.dirname(path))
+ except OSError as ose:
+ if ose.errno != errno.ENOTEMPTY:
+ raise
+
+
+def _export_patches(srctree, rd, start_rev, destdir):
+ """Export patches from srctree to given location.
+ Returns three-tuple of dicts:
+ 1. updated - patches that already exist in SRCURI
+ 2. added - new patches that don't exist in SRCURI
+ 3 removed - patches that exist in SRCURI but not in exported patches
+ In each dict the key is the 'basepath' of the URI and value is the
+ absolute path to the existing file in recipe space (if any).
+ """
+ import oe.recipeutils
+ from oe.patch import GitApplyTree
+ updated = OrderedDict()
+ added = OrderedDict()
+ seqpatch_re = re.compile('^([0-9]{4}-)?(.+)')
+
+ existing_patches = dict((os.path.basename(path), path) for path in
+ oe.recipeutils.get_recipe_patches(rd))
+
+ # Generate patches from Git, exclude local files directory
+ patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
+ GitApplyTree.extractPatches(srctree, start_rev, destdir, patch_pathspec)
+
+ new_patches = sorted(os.listdir(destdir))
+ for new_patch in new_patches:
+ # Strip numbering from patch names. If it's a git sequence named patch,
+ # the numbers might not match up since we are starting from a different
+ # revision This does assume that people are using unique shortlog
+ # values, but they ought to be anyway...
+ new_basename = seqpatch_re.match(new_patch).group(2)
+ found = False
+ for old_patch in existing_patches:
+ old_basename = seqpatch_re.match(old_patch).group(2)
+ if new_basename == old_basename:
+ updated[new_patch] = existing_patches.pop(old_patch)
+ found = True
+ # Rename patch files
+ if new_patch != old_patch:
+ os.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, old_patch))
+ break
+ if not found:
+ added[new_patch] = None
+ return (updated, added, existing_patches)
+
+
+def _create_kconfig_diff(srctree, rd, outfile):
+ """Create a kconfig fragment"""
+ # Only update config fragment if both config files exist
+ orig_config = os.path.join(srctree, '.config.baseline')
+ new_config = os.path.join(srctree, '.config.new')
+ if os.path.exists(orig_config) and os.path.exists(new_config):
+ cmd = ['diff', '--new-line-format=%L', '--old-line-format=',
+ '--unchanged-line-format=', orig_config, new_config]
+ pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = pipe.communicate()
+ if pipe.returncode == 1:
+ logger.info("Updating config fragment %s" % outfile)
+ with open(outfile, 'w') as fobj:
+ fobj.write(stdout)
+ elif pipe.returncode == 0:
+ logger.info("Would remove config fragment %s" % outfile)
+ if os.path.exists(outfile):
+ # Remove fragment file in case of empty diff
+ logger.info("Removing config fragment %s" % outfile)
+ os.unlink(outfile)
+ else:
+ raise bb.process.ExecutionError(cmd, pipe.returncode, stdout, stderr)
+ return True
+ return False
+
+
+def _export_local_files(srctree, rd, destdir):
+ """Copy local files from srctree to given location.
+ Returns three-tuple of dicts:
+ 1. updated - files that already exist in SRCURI
+ 2. added - new files files that don't exist in SRCURI
+ 3 removed - files that exist in SRCURI but not in exported files
+ In each dict the key is the 'basepath' of the URI and value is the
+ absolute path to the existing file in recipe space (if any).
+ """
+ import oe.recipeutils
+
+ # Find out local files (SRC_URI files that exist in the "recipe space").
+ # Local files that reside in srctree are not included in patch generation.
+ # Instead they are directly copied over the original source files (in
+ # recipe space).
+ existing_files = oe.recipeutils.get_recipe_local_files(rd)
+ new_set = None
+ updated = OrderedDict()
+ added = OrderedDict()
+ removed = OrderedDict()
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ git_files = _git_ls_tree(srctree)
+ if 'oe-local-files' in git_files:
+ # If tracked by Git, take the files from srctree HEAD. First get
+ # the tree object of the directory
+ tmp_index = os.path.join(srctree, '.git', 'index.tmp.devtool')
+ tree = git_files['oe-local-files'][2]
+ bb.process.run(['git', 'checkout', tree, '--', '.'], cwd=srctree,
+ env=dict(os.environ, GIT_WORK_TREE=destdir,
+ GIT_INDEX_FILE=tmp_index))
+ new_set = _git_ls_tree(srctree, tree, True).keys()
+ elif os.path.isdir(local_files_dir):
+ # If not tracked by Git, just copy from working copy
+ new_set = _ls_tree(os.path.join(srctree, 'oe-local-files'))
+ bb.process.run(['cp', '-ax',
+ os.path.join(srctree, 'oe-local-files', '.'), destdir])
+ else:
+ new_set = []
+
+ # Special handling for kernel config
+ if bb.data.inherits_class('kernel-yocto', rd):
+ fragment_fn = 'devtool-fragment.cfg'
+ fragment_path = os.path.join(destdir, fragment_fn)
+ if _create_kconfig_diff(srctree, rd, fragment_path):
+ if os.path.exists(fragment_path):
+ if fragment_fn not in new_set:
+ new_set.append(fragment_fn)
+ # Copy fragment to local-files
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(fragment_path, local_files_dir)
+ else:
+ if fragment_fn in new_set:
+ new_set.remove(fragment_fn)
+ # Remove fragment from local-files
+ if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
+ os.unlink(os.path.join(local_files_dir, fragment_fn))
+
+ if new_set is not None:
+ for fname in new_set:
+ if fname in existing_files:
+ updated[fname] = existing_files.pop(fname)
+ elif fname != '.gitignore':
+ added[fname] = None
+
+ removed = existing_files
+ return (updated, added, removed)
+
+
+def _update_recipe_srcrev(args, srctree, rd, config_data):
+ """Implement the 'srcrev' mode of update-recipe"""
+ import bb
+ import oe.recipeutils
+
+ recipefile = rd.getVar('FILE', True)
+ logger.info('Updating SRCREV in recipe %s' % os.path.basename(recipefile))
+
+ # Get HEAD revision
+ try:
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ except bb.process.ExecutionError as err:
+ raise DevtoolError('Failed to get HEAD revision in %s: %s' %
+ (srctree, err))
+ srcrev = stdout.strip()
+ if len(srcrev) != 40:
+ raise DevtoolError('Invalid hash returned by git: %s' % stdout)
+
+ destpath = None
+ remove_files = []
+ patchfields = {}
+ patchfields['SRCREV'] = srcrev
+ orig_src_uri = rd.getVar('SRC_URI', False) or ''
+ srcuri = orig_src_uri.split()
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ update_srcuri = False
+ try:
+ local_files_dir = tempfile.mkdtemp(dir=tempdir)
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
+ if not args.no_remove:
+ # Find list of existing patches in recipe file
+ patches_dir = tempfile.mkdtemp(dir=tempdir)
+ old_srcrev = (rd.getVar('SRCREV', False) or '')
+ upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
+ patches_dir)
+
+ # Remove deleted local files and "overlapping" patches
+ remove_files = del_f.values() + upd_p.values()
+ if remove_files:
+ removedentries = _remove_file_entries(srcuri, remove_files)[0]
+ update_srcuri = True
+
+ if args.append:
+ files = dict((os.path.join(local_files_dir, key), val) for
+ key, val in upd_f.items() + new_f.items())
+ removevalues = {}
+ if update_srcuri:
+ removevalues = {'SRC_URI': removedentries}
+ patchfields['SRC_URI'] = '\\\n '.join(srcuri)
+ _, destpath = oe.recipeutils.bbappend_recipe(
+ rd, args.append, files, wildcardver=args.wildcard_version,
+ extralines=patchfields, removevalues=removevalues)
+ else:
+ files_dir = os.path.join(os.path.dirname(recipefile),
+ rd.getVar('BPN', True))
+ for basepath, path in upd_f.iteritems():
+ logger.info('Updating file %s' % basepath)
+ _move_file(os.path.join(local_files_dir, basepath), path)
+ update_srcuri= True
+ for basepath, path in new_f.iteritems():
+ logger.info('Adding new file %s' % basepath)
+ _move_file(os.path.join(local_files_dir, basepath),
+ os.path.join(files_dir, basepath))
+ srcuri.append('file://%s' % basepath)
+ update_srcuri = True
+ if update_srcuri:
+ patchfields['SRC_URI'] = ' '.join(srcuri)
+ oe.recipeutils.patch_recipe(rd, recipefile, patchfields)
+ finally:
+ shutil.rmtree(tempdir)
+ if not 'git://' in orig_src_uri:
+ logger.info('You will need to update SRC_URI within the recipe to '
+ 'point to a git repository where you have pushed your '
+ 'changes')
+
+ _remove_source_files(args, remove_files, destpath)
+ return True
+
+def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
+ """Implement the 'patch' mode of update-recipe"""
+ import bb
+ import oe.recipeutils
+
+ recipefile = rd.getVar('FILE', True)
+ append = workspace[args.recipename]['bbappend']
+ if not os.path.exists(append):
+ raise DevtoolError('unable to find workspace bbappend for recipe %s' %
+ args.recipename)
+
+ initial_rev, update_rev, changed_revs = _get_patchset_revs(srctree, append, args.initial_rev)
+ if not initial_rev:
+ raise DevtoolError('Unable to find initial revision - please specify '
+ 'it with --initial-rev')
+
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ local_files_dir = tempfile.mkdtemp(dir=tempdir)
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
+
+ remove_files = []
+ if not args.no_remove:
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ upd_p, new_p, del_p = _export_patches(srctree, rd, initial_rev,
+ all_patches_dir)
+ # Remove deleted local files and patches
+ remove_files = del_f.values() + del_p.values()
+
+ # Get updated patches from source tree
+ patches_dir = tempfile.mkdtemp(dir=tempdir)
+ upd_p, new_p, del_p = _export_patches(srctree, rd, update_rev,
+ patches_dir)
+ updatefiles = False
+ updaterecipe = False
+ destpath = None
+ srcuri = (rd.getVar('SRC_URI', False) or '').split()
+ if args.append:
+ files = dict((os.path.join(local_files_dir, key), val) for
+ key, val in upd_f.items() + new_f.items())
+ files.update(dict((os.path.join(patches_dir, key), val) for
+ key, val in upd_p.items() + new_p.items()))
+ if files or remove_files:
+ removevalues = None
+ if remove_files:
+ removedentries, remaining = _remove_file_entries(
+ srcuri, remove_files)
+ if removedentries or remaining:
+ remaining = ['file://' + os.path.basename(item) for
+ item in remaining]
+ removevalues = {'SRC_URI': removedentries + remaining}
+ _, destpath = oe.recipeutils.bbappend_recipe(
+ rd, args.append, files,
+ wildcardver=args.wildcard_version,
+ removevalues=removevalues)
+ else:
+ logger.info('No patches or local source files needed updating')
+ else:
+ # Update existing files
+ for basepath, path in upd_f.iteritems():
+ logger.info('Updating file %s' % basepath)
+ _move_file(os.path.join(local_files_dir, basepath), path)
+ updatefiles = True
+ for basepath, path in upd_p.iteritems():
+ patchfn = os.path.join(patches_dir, basepath)
+ if changed_revs is not None:
+ # Avoid updating patches that have not actually changed
+ with open(patchfn, 'r') as f:
+ firstlineitems = f.readline().split()
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs:
+ continue
+ logger.info('Updating patch %s' % basepath)
+ _move_file(patchfn, path)
+ updatefiles = True
+ # Add any new files
+ files_dir = os.path.join(os.path.dirname(recipefile),
+ rd.getVar('BPN', True))
+ for basepath, path in new_f.iteritems():
+ logger.info('Adding new file %s' % basepath)
+ _move_file(os.path.join(local_files_dir, basepath),
+ os.path.join(files_dir, basepath))
+ srcuri.append('file://%s' % basepath)
+ updaterecipe = True
+ for basepath, path in new_p.iteritems():
+ logger.info('Adding new patch %s' % basepath)
+ _move_file(os.path.join(patches_dir, basepath),
+ os.path.join(files_dir, basepath))
+ srcuri.append('file://%s' % basepath)
+ updaterecipe = True
+ # Update recipe, if needed
+ if _remove_file_entries(srcuri, remove_files)[0]:
+ updaterecipe = True
+ if updaterecipe:
+ logger.info('Updating recipe %s' % os.path.basename(recipefile))
+ oe.recipeutils.patch_recipe(rd, recipefile,
+ {'SRC_URI': ' '.join(srcuri)})
+ elif not updatefiles:
+ # Neither patches nor recipe were updated
+ logger.info('No patches or files need updating')
+ return False
+ finally:
+ shutil.rmtree(tempdir)
+
+ _remove_source_files(args, remove_files, destpath)
+ return True
+
+def _guess_recipe_update_mode(srctree, rdata):
+ """Guess the recipe update mode to use"""
+ src_uri = (rdata.getVar('SRC_URI', False) or '').split()
+ git_uris = [uri for uri in src_uri if uri.startswith('git://')]
+ if not git_uris:
+ return 'patch'
+ # Just use the first URI for now
+ uri = git_uris[0]
+ # Check remote branch
+ params = bb.fetch.decodeurl(uri)[5]
+ upstr_branch = params['branch'] if 'branch' in params else 'master'
+ # Check if current branch HEAD is found in upstream branch
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ head_rev = stdout.rstrip()
+ stdout, _ = bb.process.run('git branch -r --contains %s' % head_rev,
+ cwd=srctree)
+ remote_brs = [branch.strip() for branch in stdout.splitlines()]
+ if 'origin/' + upstr_branch in remote_brs:
+ return 'srcrev'
+
+ return 'patch'
+
+def update_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'update-recipe' subcommand"""
+ check_workspace_recipe(workspace, args.recipename)
+
+ if args.append:
+ if not os.path.exists(args.append):
+ raise DevtoolError('bbappend destination layer directory "%s" '
+ 'does not exist' % args.append)
+ if not os.path.exists(os.path.join(args.append, 'conf', 'layer.conf')):
+ raise DevtoolError('conf/layer.conf not found in bbappend '
+ 'destination layer "%s"' % args.append)
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ srctree = workspace[args.recipename]['srctree']
+ if args.mode == 'auto':
+ mode = _guess_recipe_update_mode(srctree, rd)
+ else:
+ mode = args.mode
+
+ if mode == 'srcrev':
+ updated = _update_recipe_srcrev(args, srctree, rd, tinfoil.config_data)
+ elif mode == 'patch':
+ updated = _update_recipe_patch(args, config, workspace, srctree, rd, tinfoil.config_data)
+ else:
+ raise DevtoolError('update_recipe: invalid mode %s' % mode)
+
+ if updated:
+ rf = rd.getVar('FILE', True)
+ if rf.startswith(config.workspace_path):
+ logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+
+ return 0
+
+
+def status(args, config, basepath, workspace):
+ """Entry point for the devtool 'status' subcommand"""
+ if workspace:
+ for recipe, value in workspace.iteritems():
+ recipefile = value['recipefile']
+ if recipefile:
+ recipestr = ' (%s)' % recipefile
+ else:
+ recipestr = ''
+ print("%s: %s%s" % (recipe, value['srctree'], recipestr))
+ else:
+ logger.info('No recipes currently in your workspace - you can use "devtool modify" to work on an existing recipe or "devtool add" to add a new one')
+ return 0
+
+
+def reset(args, config, basepath, workspace):
+ """Entry point for the devtool 'reset' subcommand"""
+ import bb
+ if args.recipename:
+ if args.all:
+ raise DevtoolError("Recipe cannot be specified if -a/--all is used")
+ else:
+ check_workspace_recipe(workspace, args.recipename, checksrc=False)
+ elif not args.all:
+ raise DevtoolError("Recipe must be specified, or specify -a/--all to "
+ "reset all recipes")
+ if args.all:
+ recipes = workspace.keys()
+ else:
+ recipes = [args.recipename]
+
+ if recipes and not args.no_clean:
+ if len(recipes) == 1:
+ logger.info('Cleaning sysroot for recipe %s...' % recipes[0])
+ else:
+ logger.info('Cleaning sysroot for recipes %s...' % ', '.join(recipes))
+ # If the recipe file itself was created in the workspace, and
+ # it uses BBCLASSEXTEND, then we need to also clean the other
+ # variants
+ targets = []
+ for recipe in recipes:
+ targets.append(recipe)
+ recipefile = workspace[recipe]['recipefile']
+ if recipefile and os.path.exists(recipefile):
+ targets.extend(get_bbclassextend_targets(recipefile, recipe))
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c clean %s' % ' '.join(targets))
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Command \'%s\' failed, output:\n%s\nIf you '
+ 'wish, you may specify -n/--no-clean to '
+ 'skip running this command when resetting' %
+ (e.command, e.stdout))
+
+ for pn in recipes:
+ _check_preserve(config, pn)
+
+ preservepath = os.path.join(config.workspace_path, 'attic', pn, pn)
+ def preservedir(origdir):
+ if os.path.exists(origdir):
+ for root, dirs, files in os.walk(origdir):
+ for fn in files:
+ logger.warn('Preserving %s in %s' % (fn, preservepath))
+ _move_file(os.path.join(origdir, fn),
+ os.path.join(preservepath, fn))
+ for dn in dirs:
+ preservedir(os.path.join(root, dn))
+ os.rmdir(origdir)
+
+ preservedir(os.path.join(config.workspace_path, 'recipes', pn))
+ # We don't automatically create this dir next to appends, but the user can
+ preservedir(os.path.join(config.workspace_path, 'appends', pn))
+
+ srctree = workspace[pn]['srctree']
+ if os.path.isdir(srctree):
+ if os.listdir(srctree):
+ # We don't want to risk wiping out any work in progress
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctree)
+ else:
+ # This is unlikely, but if it's empty we can just remove it
+ os.rmdir(srctree)
+
+ return 0
+
+
+def get_default_srctree(config, recipename=''):
+ """Get the default srctree path"""
+ srctreeparent = config.get('General', 'default_source_parent_dir', config.workspace_path)
+ if recipename:
+ return os.path.join(srctreeparent, 'sources', recipename)
+ else:
+ return os.path.join(srctreeparent, 'sources')
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ defsrctree = get_default_srctree(context.config)
+ parser_add = subparsers.add_parser('add', help='Add a new recipe',
+ description='Adds a new recipe to the workspace to build a specified source tree. Can optionally fetch a remote URI and unpack it to create the source tree.',
+ group='starting', order=100)
+ parser_add.add_argument('recipename', nargs='?', help='Name for new recipe to add (just name - no version, path or extension). If not specified, will attempt to auto-detect it.')
+ parser_add.add_argument('srctree', nargs='?', help='Path to external source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_add.add_argument('fetchuri', nargs='?', help='Fetch the specified URI and extract it to create the source tree')
+ group = parser_add.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
+ parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
+ parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
+ parser_add.add_argument('--binary', '-b', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure). Useful with binary packages e.g. RPMs.', action='store_true')
+ parser_add.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
+ parser_add.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
+ parser_add.set_defaults(func=add)
+
+ parser_modify = subparsers.add_parser('modify', help='Modify the source for an existing recipe',
+ description='Sets up the build environment to modify the source for an existing recipe. The default behaviour is to extract the source being fetched by the recipe into a git tree so you can work on it; alternatively if you already have your own pre-prepared source tree you can specify -n/--no-extract.',
+ group='starting', order=90)
+ parser_modify.add_argument('recipename', help='Name of existing recipe to edit (just name - no version, path or extension)')
+ parser_modify.add_argument('srctree', nargs='?', help='Path to external source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_modify.add_argument('--wildcard', '-w', action="store_true", help='Use wildcard for unversioned bbappend')
+ group = parser_modify.add_mutually_exclusive_group()
+ group.add_argument('--extract', '-x', action="store_true", help='Extract source for recipe (default)')
+ group.add_argument('--no-extract', '-n', action="store_true", help='Do not extract source, expect it to exist')
+ group = parser_modify.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_modify.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (when not using -n/--no-extract) (default "%(default)s")')
+ parser_modify.set_defaults(func=modify)
+
+ parser_extract = subparsers.add_parser('extract', help='Extract the source for an existing recipe',
+ description='Extracts the source for an existing recipe',
+ group='advanced')
+ parser_extract.add_argument('recipename', help='Name of recipe to extract the source for')
+ parser_extract.add_argument('srctree', help='Path to where to extract the source tree')
+ parser_extract.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (default "%(default)s")')
+ parser_extract.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_extract.set_defaults(func=extract, no_workspace=True)
+
+ parser_sync = subparsers.add_parser('sync', help='Synchronize the source tree for an existing recipe',
+ description='Synchronize the previously extracted source tree for an existing recipe',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ group='advanced')
+ parser_sync.add_argument('recipename', help='Name of recipe to sync the source for')
+ parser_sync.add_argument('srctree', help='Path to the source tree')
+ parser_sync.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout')
+ parser_sync.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_sync.set_defaults(func=sync)
+
+ parser_update_recipe = subparsers.add_parser('update-recipe', help='Apply changes from external source tree to recipe',
+ description='Applies changes from external source tree to a recipe (updating/adding/removing patches as necessary, or by updating SRCREV). Note that these changes need to have been committed to the git repository in order to be recognised.',
+ group='working', order=-90)
+ parser_update_recipe.add_argument('recipename', help='Name of recipe to update')
+ parser_update_recipe.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
+ parser_update_recipe.add_argument('--initial-rev', help='Override starting revision for patches')
+ parser_update_recipe.add_argument('--append', '-a', help='Write changes to a bbappend in the specified layer instead of the recipe', metavar='LAYERDIR')
+ parser_update_recipe.add_argument('--wildcard-version', '-w', help='In conjunction with -a/--append, use a wildcard to make the bbappend apply to any recipe version', action='store_true')
+ parser_update_recipe.add_argument('--no-remove', '-n', action="store_true", help='Don\'t remove patches, only add or update')
+ parser_update_recipe.set_defaults(func=update_recipe)
+
+ parser_status = subparsers.add_parser('status', help='Show workspace status',
+ description='Lists recipes currently in your workspace and the paths to their respective external source trees',
+ group='info', order=100)
+ parser_status.set_defaults(func=status)
+
+ parser_reset = subparsers.add_parser('reset', help='Remove a recipe from your workspace',
+ description='Removes the specified recipe from your workspace (resetting its state)',
+ group='working', order=-100)
+ parser_reset.add_argument('recipename', nargs='?', help='Recipe to reset')
+ parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
+ parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.set_defaults(func=reset)
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
new file mode 100644
index 0000000..a085f78
--- /dev/null
+++ b/scripts/lib/devtool/upgrade.py
@@ -0,0 +1,382 @@
+# Development tool - upgrade command plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+"""Devtool upgrade plugin"""
+
+import os
+import sys
+import re
+import shutil
+import tempfile
+import logging
+import argparse
+import scriptutils
+import errno
+import bb
+import oe.recipeutils
+from devtool import standard
+from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build
+
+logger = logging.getLogger('devtool')
+
+def _run(cmd, cwd=''):
+ logger.debug("Running command %s> %s" % (cwd,cmd))
+ return bb.process.run('%s' % cmd, cwd=cwd)
+
+def _get_srctree(tmpdir):
+ srctree = tmpdir
+ dirs = os.listdir(tmpdir)
+ if len(dirs) == 1:
+ srctree = os.path.join(tmpdir, dirs[0])
+ return srctree
+
+def _copy_source_code(orig, dest):
+ for path in standard._ls_tree(orig):
+ dest_dir = os.path.join(dest, os.path.dirname(path))
+ bb.utils.mkdirhier(dest_dir)
+ dest_path = os.path.join(dest, path)
+ shutil.move(os.path.join(orig, path), dest_path)
+
+def _get_checksums(rf):
+ import re
+ checksums = {}
+ with open(rf) as f:
+ for line in f:
+ for cs in ['md5sum', 'sha256sum']:
+ m = re.match("^SRC_URI\[%s\].*=.*\"(.*)\"" % cs, line)
+ if m:
+ checksums[cs] = m.group(1)
+ return checksums
+
+def _remove_patch_dirs(recipefolder):
+ for root, dirs, files in os.walk(recipefolder):
+ for d in dirs:
+ shutil.rmtree(os.path.join(root,d))
+
+def _recipe_contains(rd, var):
+ rf = rd.getVar('FILE', True)
+ varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
+ for var, fn in varfiles.iteritems():
+ if fn and fn.startswith(os.path.dirname(rf) + os.sep):
+ return True
+ return False
+
+def _rename_recipe_dirs(oldpv, newpv, path):
+ for root, dirs, files in os.walk(path):
+ for olddir in dirs:
+ if olddir.find(oldpv) != -1:
+ newdir = olddir.replace(oldpv, newpv)
+ if olddir != newdir:
+ shutil.move(os.path.join(path, olddir), os.path.join(path, newdir))
+
+def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
+ oldrecipe = os.path.basename(oldrecipe)
+ if oldrecipe.endswith('_%s.bb' % oldpv):
+ newrecipe = '%s_%s.bb' % (bpn, newpv)
+ if oldrecipe != newrecipe:
+ shutil.move(os.path.join(path, oldrecipe), os.path.join(path, newrecipe))
+ else:
+ newrecipe = oldrecipe
+ return os.path.join(path, newrecipe)
+
+def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
+ _rename_recipe_dirs(oldpv, newpv, path)
+ return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
+
+def _write_append(rc, srctree, same_dir, no_same_dir, rev, workspace, d):
+ """Writes an append file"""
+ if not os.path.exists(rc):
+ raise DevtoolError("bbappend not created because %s does not exist" % rc)
+
+ appendpath = os.path.join(workspace, 'appends')
+ if not os.path.exists(appendpath):
+ bb.utils.mkdirhier(appendpath)
+
+ brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
+
+ srctree = os.path.abspath(srctree)
+ pn = d.getVar('PN',True)
+ af = os.path.join(appendpath, '%s.bbappend' % brf)
+ with open(af, 'w') as f:
+ f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
+ f.write('inherit externalsrc\n')
+ f.write(('# NOTE: We use pn- overrides here to avoid affecting'
+ 'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
+ f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ b_is_s = use_external_build(same_dir, no_same_dir, d)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ if rev:
+ f.write('\n# initial_rev: %s\n' % rev)
+ return af
+
+def _cleanup_on_error(rf, srctree):
+ rfp = os.path.split(rf)[0] # recipe folder
+ rfpp = os.path.split(rfp)[0] # recipes folder
+ if os.path.exists(rfp):
+ shutil.rmtree(b)
+ if not len(os.listdir(rfpp)):
+ os.rmdir(rfpp)
+ srctree = os.path.abspath(srctree)
+ if os.path.exists(srctree):
+ shutil.rmtree(srctree)
+
+def _upgrade_error(e, rf, srctree):
+ if rf:
+ cleanup_on_error(rf, srctree)
+ logger.error(e)
+ raise DevtoolError(e)
+
+def _get_uri(rd):
+ srcuris = rd.getVar('SRC_URI', True).split()
+ if not len(srcuris):
+ raise DevtoolError('SRC_URI not found on recipe')
+ # Get first non-local entry in SRC_URI - usually by convention it's
+ # the first entry, but not always!
+ srcuri = None
+ for entry in srcuris:
+ if not entry.startswith('file://'):
+ srcuri = entry
+ break
+ if not srcuri:
+ raise DevtoolError('Unable to find non-local entry in SRC_URI')
+ srcrev = '${AUTOREV}'
+ if '://' in srcuri:
+ # Fetch a URL
+ rev_re = re.compile(';rev=([^;]+)')
+ res = rev_re.search(srcuri)
+ if res:
+ srcrev = res.group(1)
+ srcuri = rev_re.sub('', srcuri)
+ return srcuri, srcrev
+
+def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tinfoil, rd):
+ """Extract sources of a recipe with a new version"""
+
+ def __run(cmd):
+ """Simple wrapper which calls _run with srctree as cwd"""
+ return _run(cmd, srctree)
+
+ crd = rd.createCopy()
+
+ pv = crd.getVar('PV', True)
+ crd.setVar('PV', newpv)
+
+ tmpsrctree = None
+ uri, rev = _get_uri(crd)
+ if srcrev:
+ rev = srcrev
+ if uri.startswith('git://'):
+ __run('git fetch')
+ __run('git checkout %s' % rev)
+ __run('git tag -f devtool-base-new')
+ md5 = None
+ sha256 = None
+ else:
+ __run('git checkout devtool-base -b devtool-%s' % newpv)
+
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ md5, sha256 = scriptutils.fetch_uri(tinfoil.config_data, uri, tmpdir, rev)
+ except bb.fetch2.FetchError as e:
+ raise DevtoolError(e)
+
+ tmpsrctree = _get_srctree(tmpdir)
+ srctree = os.path.abspath(srctree)
+
+ # Delete all sources so we ensure no stray files are left over
+ for item in os.listdir(srctree):
+ if item in ['.git', 'oe-local-files']:
+ continue
+ itempath = os.path.join(srctree, item)
+ if os.path.isdir(itempath):
+ shutil.rmtree(itempath)
+ else:
+ os.remove(itempath)
+
+ # Copy in new ones
+ _copy_source_code(tmpsrctree, srctree)
+
+ (stdout,_) = __run('git ls-files --modified --others --exclude-standard')
+ for f in stdout.splitlines():
+ __run('git add "%s"' % f)
+
+ __run('git commit -q -m "Commit of upstream changes at version %s" --allow-empty' % newpv)
+ __run('git tag -f devtool-base-%s' % newpv)
+
+ (stdout, _) = __run('git rev-parse HEAD')
+ rev = stdout.rstrip()
+
+ if no_patch:
+ patches = oe.recipeutils.get_recipe_patches(crd)
+ if len(patches):
+ logger.warn('By user choice, the following patches will NOT be applied')
+ for patch in patches:
+ logger.warn("%s" % os.path.basename(patch))
+ else:
+ try:
+ __run('git checkout devtool-patched -b %s' % branch)
+ __run('git rebase %s' % rev)
+ if uri.startswith('git://'):
+ suffix = 'new'
+ else:
+ suffix = newpv
+ __run('git tag -f devtool-patched-%s' % suffix)
+ except bb.process.ExecutionError as e:
+ logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+
+ if tmpsrctree:
+ if keep_temp:
+ logger.info('Preserving temporary directory %s' % tmpsrctree)
+ else:
+ shutil.rmtree(tmpsrctree)
+
+ return (rev, md5, sha256)
+
+def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil, rd):
+ """Creates the new recipe under workspace"""
+
+ bpn = rd.getVar('BPN', True)
+ path = os.path.join(workspace, 'recipes', bpn)
+ bb.utils.mkdirhier(path)
+ oe.recipeutils.copy_recipe_files(rd, path)
+
+ oldpv = rd.getVar('PV', True)
+ if not newpv:
+ newpv = oldpv
+ origpath = rd.getVar('FILE', True)
+ fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
+ logger.debug('Upgraded %s => %s' % (origpath, fullpath))
+
+ newvalues = {}
+ if _recipe_contains(rd, 'PV') and newpv != oldpv:
+ newvalues['PV'] = newpv
+
+ if srcrev:
+ newvalues['SRCREV'] = srcrev
+
+ if srcbranch:
+ src_uri = oe.recipeutils.split_var_value(rd.getVar('SRC_URI', False) or '')
+ changed = False
+ replacing = True
+ new_src_uri = []
+ for entry in src_uri:
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ if replacing and scheme in ['git', 'gitsm']:
+ branch = params.get('branch', 'master')
+ if rd.expand(branch) != srcbranch:
+ # Handle case where branch is set through a variable
+ res = re.match(r'\$\{([^}@]+)\}', branch)
+ if res:
+ newvalues[res.group(1)] = srcbranch
+ # We know we won't change SRC_URI now, so break out
+ break
+ else:
+ params['branch'] = srcbranch
+ entry = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+ changed = True
+ replacing = False
+ new_src_uri.append(entry)
+ if changed:
+ newvalues['SRC_URI'] = ' '.join(new_src_uri)
+
+ newvalues['PR'] = None
+
+ if md5 and sha256:
+ newvalues['SRC_URI[md5sum]'] = md5
+ newvalues['SRC_URI[sha256sum]'] = sha256
+
+ rd = oe.recipeutils.parse_recipe(fullpath, None, tinfoil.config_data)
+ oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
+
+ return fullpath
+
+def upgrade(args, config, basepath, workspace):
+ """Entry point for the devtool 'upgrade' subcommand"""
+
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" % args.recipename)
+ if not args.version and not args.srcrev:
+ raise DevtoolError("You must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option")
+ if args.srcbranch and not args.srcrev:
+ raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ pn = rd.getVar('PN', True)
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" % pn)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = standard.get_default_srctree(config, pn)
+
+ standard._check_compatible_recipe(pn, rd)
+ old_srcrev = rd.getVar('SRCREV', True)
+ if old_srcrev == 'INVALID':
+ old_srcrev = None
+ if old_srcrev and not args.srcrev:
+ raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
+ if rd.getVar('PV', True) == args.version and old_srcrev == args.srcrev:
+ raise DevtoolError("Current and upgrade versions are the same version")
+
+ rf = None
+ try:
+ rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd)
+ rev2, md5, sha256 = _extract_new_source(args.version, srctree, args.no_patch,
+ args.srcrev, args.branch, args.keep_temp,
+ tinfoil, rd)
+ rf = _create_new_recipe(args.version, md5, sha256, args.srcrev, args.srcbranch, config.workspace_path, tinfoil, rd)
+ except bb.process.CmdError as e:
+ _upgrade_error(e, rf, srctree)
+ except DevtoolError as e:
+ _upgrade_error(e, rf, srctree)
+ standard._add_md5(config, pn, os.path.dirname(rf))
+
+ af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ config.workspace_path, rd)
+ standard._add_md5(config, pn, af)
+ logger.info('Upgraded source extracted to %s' % srctree)
+ logger.info('New recipe is %s' % rf)
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ defsrctree = standard.get_default_srctree(context.config)
+
+ parser_upgrade = subparsers.add_parser('upgrade', help='Upgrade an existing recipe',
+ description='Upgrades an existing recipe to a new upstream version. Puts the upgraded recipe file into the workspace along with any associated files, and extracts the source tree to a specified location (in case patches need rebasing or adding to as a result of the upgrade).',
+ group='starting')
+ parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
+ parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV)')
+ parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (if fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
+ parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
+ group = parser_upgrade.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_upgrade.set_defaults(func=upgrade)
diff --git a/scripts/lib/devtool/utilcmds.py b/scripts/lib/devtool/utilcmds.py
new file mode 100644
index 0000000..b761a80
--- /dev/null
+++ b/scripts/lib/devtool/utilcmds.py
@@ -0,0 +1,233 @@
+# Development tool - utility commands plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool utility plugins"""
+
+import os
+import sys
+import shutil
+import tempfile
+import logging
+import argparse
+import subprocess
+import scriptutils
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
+
+logger = logging.getLogger('devtool')
+
+
+def edit_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'edit-recipe' subcommand"""
+ if args.any_recipe:
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+ recipefile = rd.getVar('FILE', True)
+ finally:
+ tinfoil.shutdown()
+ else:
+ check_workspace_recipe(workspace, args.recipename)
+ recipefile = workspace[args.recipename]['recipefile']
+ if not recipefile:
+ raise DevtoolError("Recipe file for %s is not under the workspace" %
+ args.recipename)
+
+ return scriptutils.run_editor(recipefile)
+
+
+def configure_help(args, config, basepath, workspace):
+ """Entry point for the devtool 'configure-help' subcommand"""
+ import oe.utils
+
+ check_workspace_recipe(workspace, args.recipename)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ b = rd.getVar('B', True)
+ s = rd.getVar('S', True)
+ configurescript = os.path.join(s, 'configure')
+ confdisabled = 'noexec' in rd.getVarFlags('do_configure') or 'do_configure' not in (rd.getVar('__BBTASKS', False) or [])
+ configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS', True) or '')
+ extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF', True) or '')
+ extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE', True) or '')
+ do_configure = rd.getVar('do_configure', True) or ''
+ do_configure_noexpand = rd.getVar('do_configure', False) or ''
+ packageconfig = rd.getVarFlags('PACKAGECONFIG') or []
+ autotools = bb.data.inherits_class('autotools', rd) and ('oe_runconf' in do_configure or 'autotools_do_configure' in do_configure)
+ cmake = bb.data.inherits_class('cmake', rd) and ('cmake_do_configure' in do_configure)
+ cmake_do_configure = rd.getVar('cmake_do_configure', True)
+ pn = rd.getVar('PN', True)
+ finally:
+ tinfoil.shutdown()
+
+ if 'doc' in packageconfig:
+ del packageconfig['doc']
+
+ if autotools and not os.path.exists(configurescript):
+ logger.info('Running do_configure to generate configure script')
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath,
+ 'bitbake -c configure %s' % args.recipename,
+ stderr=subprocess.STDOUT)
+ except bb.process.ExecutionError:
+ pass
+
+ if confdisabled or do_configure.strip() in ('', ':'):
+ raise DevtoolError("do_configure task has been disabled for this recipe")
+ elif args.no_pager and not os.path.exists(configurescript):
+ raise DevtoolError("No configure script found and no other information to display")
+ else:
+ configopttext = ''
+ if autotools and configureopts:
+ configopttext = '''
+Arguments currently passed to the configure script:
+
+%s
+
+Some of those are fixed.''' % (configureopts + ' ' + extra_oeconf)
+ if extra_oeconf:
+ configopttext += ''' The ones that are specified through EXTRA_OECONF (which you can change or add to easily):
+
+%s''' % extra_oeconf
+
+ elif cmake:
+ in_cmake = False
+ cmake_cmd = ''
+ for line in cmake_do_configure.splitlines():
+ if in_cmake:
+ cmake_cmd = cmake_cmd + ' ' + line.strip().rstrip('\\')
+ if not line.endswith('\\'):
+ break
+ if line.lstrip().startswith('cmake '):
+ cmake_cmd = line.strip().rstrip('\\')
+ if line.endswith('\\'):
+ in_cmake = True
+ else:
+ break
+ if cmake_cmd:
+ configopttext = '''
+The current cmake command line:
+
+%s
+
+Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
+
+%s''' % (oe.utils.squashspaces(cmake_cmd), extra_oecmake)
+ else:
+ configopttext = '''
+The current implementation of cmake_do_configure:
+
+cmake_do_configure() {
+%s
+}
+
+Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
+
+%s''' % (cmake_do_configure.rstrip(), extra_oecmake)
+
+ elif do_configure:
+ configopttext = '''
+The current implementation of do_configure:
+
+do_configure() {
+%s
+}''' % do_configure.rstrip()
+ if '${EXTRA_OECONF}' in do_configure_noexpand:
+ configopttext += '''
+
+Arguments specified through EXTRA_OECONF (which you can change or add to easily):
+
+%s''' % extra_oeconf
+
+ if packageconfig:
+ configopttext += '''
+
+Some of these options may be controlled through PACKAGECONFIG; for more details please see the recipe.'''
+
+ if args.arg:
+ helpargs = ' '.join(args.arg)
+ elif cmake:
+ helpargs = '-LH'
+ else:
+ helpargs = '--help'
+
+ msg = '''configure information for %s
+------------------------------------------
+%s''' % (pn, configopttext)
+
+ if cmake:
+ msg += '''
+
+The cmake %s output for %s follows. After "-- Cache values" you should see a list of variables you can add to EXTRA_OECMAKE (prefixed with -D and suffixed with = followed by the desired value, without any spaces).
+------------------------------------------''' % (helpargs, pn)
+ elif os.path.exists(configurescript):
+ msg += '''
+
+The ./configure %s output for %s follows.
+------------------------------------------''' % (helpargs, pn)
+
+ olddir = os.getcwd()
+ tmppath = tempfile.mkdtemp()
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ if not args.no_header:
+ tf.write(msg + '\n')
+ tf.close()
+ try:
+ try:
+ cmd = 'cat %s' % tf.name
+ if cmake:
+ cmd += '; cmake %s %s 2>&1' % (helpargs, s)
+ os.chdir(b)
+ elif os.path.exists(configurescript):
+ cmd += '; %s %s' % (configurescript, helpargs)
+ if sys.stdout.isatty() and not args.no_pager:
+ pager = os.environ.get('PAGER', 'less')
+ cmd = '(%s) | %s' % (cmd, pager)
+ subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError as e:
+ return e.returncode
+ finally:
+ os.chdir(olddir)
+ shutil.rmtree(tmppath)
+ os.remove(tf.name)
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_edit_recipe = subparsers.add_parser('edit-recipe', help='Edit a recipe file in your workspace',
+ description='Runs the default editor (as specified by the EDITOR variable) on the specified recipe. Note that the recipe file itself must be in the workspace (i.e. as a result of "devtool add" or "devtool upgrade"); you can override this with the -a/--any-recipe option.',
+ group='working')
+ parser_edit_recipe.add_argument('recipename', help='Recipe to edit')
+ parser_edit_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Edit any recipe, not just where the recipe file itself is in the workspace')
+ parser_edit_recipe.set_defaults(func=edit_recipe)
+
+ # NOTE: Needed to override the usage string here since the default
+ # gets the order wrong - recipename must come before --arg
+ parser_configure_help = subparsers.add_parser('configure-help', help='Get help on configure script options',
+ usage='devtool configure-help [options] recipename [--arg ...]',
+ description='Displays the help for the configure script for the specified recipe (i.e. runs ./configure --help) prefaced by a header describing the current options being specified. Output is piped through less (or whatever PAGER is set to, if set) for easy browsing.',
+ group='working')
+ parser_configure_help.add_argument('recipename', help='Recipe to show configure help for')
+ parser_configure_help.add_argument('-p', '--no-pager', help='Disable paged output', action="store_true")
+ parser_configure_help.add_argument('-n', '--no-header', help='Disable explanatory header text', action="store_true")
+ parser_configure_help.add_argument('--arg', help='Pass remaining arguments to the configure script instead of --help (useful if the script has additional help options)', nargs=argparse.REMAINDER)
+ parser_configure_help.set_defaults(func=configure_help)
diff --git a/scripts/lib/recipetool/__init__.py b/scripts/lib/recipetool/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/recipetool/__init__.py
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
new file mode 100644
index 0000000..558fd25
--- /dev/null
+++ b/scripts/lib/recipetool/append.py
@@ -0,0 +1,471 @@
+# Recipe creation tool - append plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import subprocess
+import logging
+import stat
+import shutil
+import scriptutils
+import errno
+from collections import defaultdict
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+# FIXME guessing when we don't have pkgdata?
+# FIXME mode to create patch rather than directly substitute
+
+class InvalidTargetFileError(Exception):
+ pass
+
+def find_target_file(targetpath, d, pkglist=None):
+ """Find the recipe installing the specified target path, optionally limited to a select list of packages"""
+ import json
+
+ pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+
+ # The mix between /etc and ${sysconfdir} here may look odd, but it is just
+ # being consistent with usage elsewhere
+ invalidtargets = {'${sysconfdir}/version': '${sysconfdir}/version is written out at image creation time',
+ '/etc/timestamp': '/etc/timestamp is written out at image creation time',
+ '/dev/*': '/dev is handled by udev (or equivalent) and the kernel (devtmpfs)',
+ '/etc/passwd': '/etc/passwd should be managed through the useradd and extrausers classes',
+ '/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
+ '/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
+ '/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
+ '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
+
+ for pthspec, message in invalidtargets.iteritems():
+ if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
+ raise InvalidTargetFileError(d.expand(message))
+
+ targetpath_re = re.compile(r'\s+(\$D)?%s(\s|$)' % targetpath)
+
+ recipes = defaultdict(list)
+ for root, dirs, files in os.walk(os.path.join(pkgdata_dir, 'runtime')):
+ if pkglist:
+ filelist = pkglist
+ else:
+ filelist = files
+ for fn in filelist:
+ pkgdatafile = os.path.join(root, fn)
+ if pkglist and not os.path.exists(pkgdatafile):
+ continue
+ with open(pkgdatafile, 'r') as f:
+ pn = ''
+ # This does assume that PN comes before other values, but that's a fairly safe assumption
+ for line in f:
+ if line.startswith('PN:'):
+ pn = line.split(':', 1)[1].strip()
+ elif line.startswith('FILES_INFO:'):
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ for fullpth in dictval.keys():
+ if fnmatch.fnmatchcase(fullpth, targetpath):
+ recipes[targetpath].append(pn)
+ elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
+ scriptval = line.split(':', 1)[1].strip().decode('string_escape')
+ if 'update-alternatives --install %s ' % targetpath in scriptval:
+ recipes[targetpath].append('?%s' % pn)
+ elif targetpath_re.search(scriptval):
+ recipes[targetpath].append('!%s' % pn)
+ return recipes
+
+def _get_recipe_file(cooker, pn):
+ import oe.recipeutils
+ recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
+ if not recipefile:
+ skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
+ if skipreasons:
+ logger.error('\n'.join(skipreasons))
+ else:
+ logger.error("Unable to find any recipe file matching %s" % pn)
+ return recipefile
+
+def _parse_recipe(pn, tinfoil):
+ import oe.recipeutils
+ recipefile = _get_recipe_file(tinfoil.cooker, pn)
+ if not recipefile:
+ # Error already logged
+ return None
+ append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
+ rd = oe.recipeutils.parse_recipe(recipefile, append_files,
+ tinfoil.config_data)
+ return rd
+
+def determine_file_source(targetpath, rd):
+ """Assuming we know a file came from a specific recipe, figure out exactly where it came from"""
+ import oe.recipeutils
+
+ # See if it's in do_install for the recipe
+ workdir = rd.getVar('WORKDIR', True)
+ src_uri = rd.getVar('SRC_URI', True)
+ srcfile = ''
+ modpatches = []
+ elements = check_do_install(rd, targetpath)
+ if elements:
+ logger.debug('do_install line:\n%s' % ' '.join(elements))
+ srcpath = get_source_path(elements)
+ logger.debug('source path: %s' % srcpath)
+ if not srcpath.startswith('/'):
+ # Handle non-absolute path
+ srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs', True).split()[-1], srcpath))
+ if srcpath.startswith(workdir):
+ # OK, now we have the source file name, look for it in SRC_URI
+ workdirfile = os.path.relpath(srcpath, workdir)
+ # FIXME this is where we ought to have some code in the fetcher, because this is naive
+ for item in src_uri.split():
+ localpath = bb.fetch2.localpath(item, rd)
+ # Source path specified in do_install might be a glob
+ if fnmatch.fnmatch(os.path.basename(localpath), workdirfile):
+ srcfile = 'file://%s' % localpath
+ elif '/' in workdirfile:
+ if item == 'file://%s' % workdirfile:
+ srcfile = 'file://%s' % localpath
+
+ # Check patches
+ srcpatches = []
+ patchedfiles = oe.recipeutils.get_recipe_patched_files(rd)
+ for patch, filelist in patchedfiles.iteritems():
+ for fileitem in filelist:
+ if fileitem[0] == srcpath:
+ srcpatches.append((patch, fileitem[1]))
+ if srcpatches:
+ addpatch = None
+ for patch in srcpatches:
+ if patch[1] == 'A':
+ addpatch = patch[0]
+ else:
+ modpatches.append(patch[0])
+ if addpatch:
+ srcfile = 'patch://%s' % addpatch
+
+ return (srcfile, elements, modpatches)
+
+def get_source_path(cmdelements):
+ """Find the source path specified within a command"""
+ command = cmdelements[0]
+ if command in ['install', 'cp']:
+ helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True)
+ argopts = ''
+ argopt_line_re = re.compile('^-([a-zA-Z0-9]), --[a-z-]+=')
+ for line in helptext.splitlines():
+ line = line.lstrip()
+ res = argopt_line_re.search(line)
+ if res:
+ argopts += res.group(1)
+ if not argopts:
+ # Fallback
+ if command == 'install':
+ argopts = 'gmoSt'
+ elif command == 'cp':
+ argopts = 't'
+ else:
+ raise Exception('No fallback arguments for command %s' % command)
+
+ skipnext = False
+ for elem in cmdelements[1:-1]:
+ if elem.startswith('-'):
+ if len(elem) > 1 and elem[1] in argopts:
+ skipnext = True
+ continue
+ if skipnext:
+ skipnext = False
+ continue
+ return elem
+ else:
+ raise Exception('get_source_path: no handling for command "%s"')
+
+def get_func_deps(func, d):
+ """Find the function dependencies of a shell function"""
+ deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
+ deps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
+ funcdeps = []
+ for dep in deps:
+ if d.getVarFlag(dep, 'func', True):
+ funcdeps.append(dep)
+ return funcdeps
+
+def check_do_install(rd, targetpath):
+ """Look at do_install for a command that installs/copies the specified target path"""
+ instpath = os.path.abspath(os.path.join(rd.getVar('D', True), targetpath.lstrip('/')))
+ do_install = rd.getVar('do_install', True)
+ # Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
+ deps = get_func_deps('do_install', rd)
+ for dep in deps:
+ do_install = do_install.replace(dep, rd.getVar(dep, True))
+
+ # Look backwards through do_install as we want to catch where a later line (perhaps
+ # from a bbappend) is writing over the top
+ for line in reversed(do_install.splitlines()):
+ line = line.strip()
+ if (line.startswith('install ') and ' -m' in line) or line.startswith('cp '):
+ elements = line.split()
+ destpath = os.path.abspath(elements[-1])
+ if destpath == instpath:
+ return elements
+ elif destpath.rstrip('/') == os.path.dirname(instpath):
+ # FIXME this doesn't take recursive copy into account; unsure if it's practical to do so
+ srcpath = get_source_path(elements)
+ if fnmatch.fnmatchcase(os.path.basename(instpath), os.path.basename(srcpath)):
+ return elements
+ return None
+
+
+def appendfile(args):
+ import oe.recipeutils
+
+ stdout = ''
+ try:
+ (stdout, _) = bb.process.run('LANG=C file -b %s' % args.newfile, shell=True)
+ if 'cannot open' in stdout:
+ raise bb.process.ExecutionError(stdout)
+ except bb.process.ExecutionError as err:
+ logger.debug('file command returned error: %s' % err)
+ stdout = ''
+ if stdout:
+ logger.debug('file command output: %s' % stdout.rstrip())
+ if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
+ logger.warn('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
+
+ if args.recipe:
+ recipes = {args.targetpath: [args.recipe],}
+ else:
+ try:
+ recipes = find_target_file(args.targetpath, tinfoil.config_data)
+ except InvalidTargetFileError as e:
+ logger.error('%s cannot be handled by this tool: %s' % (args.targetpath, e))
+ return 1
+ if not recipes:
+ logger.error('Unable to find any package producing path %s - this may be because the recipe packaging it has not been built yet' % args.targetpath)
+ return 1
+
+ alternative_pns = []
+ postinst_pns = []
+
+ selectpn = None
+ for targetpath, pnlist in recipes.iteritems():
+ for pn in pnlist:
+ if pn.startswith('?'):
+ alternative_pns.append(pn[1:])
+ elif pn.startswith('!'):
+ postinst_pns.append(pn[1:])
+ elif selectpn:
+ # hit here with multilibs
+ continue
+ else:
+ selectpn = pn
+
+ if not selectpn and len(alternative_pns) == 1:
+ selectpn = alternative_pns[0]
+ logger.error('File %s is an alternative possibly provided by recipe %s but seemingly no other, selecting it by default - you should double check other recipes' % (args.targetpath, selectpn))
+
+ if selectpn:
+ logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
+ if postinst_pns:
+ logger.warn('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
+ rd = _parse_recipe(selectpn, tinfoil)
+ if not rd:
+ # Error message already shown
+ return 1
+ sourcefile, instelements, modpatches = determine_file_source(args.targetpath, rd)
+ sourcepath = None
+ if sourcefile:
+ sourcetype, sourcepath = sourcefile.split('://', 1)
+ logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
+ if sourcetype == 'patch':
+ logger.warn('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
+ sourcepath = None
+ else:
+ logger.debug('Unable to determine source file, proceeding anyway')
+ if modpatches:
+ logger.warn('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
+
+ if instelements and sourcepath:
+ install = None
+ else:
+ # Auto-determine permissions
+ # Check destination
+ binpaths = '${bindir}:${sbindir}:${base_bindir}:${base_sbindir}:${libexecdir}:${sysconfdir}/init.d'
+ perms = '0644'
+ if os.path.abspath(os.path.dirname(args.targetpath)) in rd.expand(binpaths).split(':'):
+ # File is going into a directory normally reserved for executables, so it should be executable
+ perms = '0755'
+ else:
+ # Check source
+ st = os.stat(args.newfile)
+ if st.st_mode & stat.S_IXUSR:
+ perms = '0755'
+ install = {args.newfile: (args.targetpath, perms)}
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ return 0
+ else:
+ if alternative_pns:
+ logger.error('File %s is an alternative possibly provided by the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(alternative_pns)))
+ elif postinst_pns:
+ logger.error('File %s may be written out in a pre/postinstall script of the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(postinst_pns)))
+ return 3
+
+
+def appendsrc(args, files, rd, extralines=None):
+ import oe.recipeutils
+
+ srcdir = rd.getVar('S', True)
+ workdir = rd.getVar('WORKDIR', True)
+
+ import bb.fetch
+ simplified = {}
+ src_uri = rd.getVar('SRC_URI', True).split()
+ for uri in src_uri:
+ if uri.endswith(';'):
+ uri = uri[:-1]
+ simple_uri = bb.fetch.URI(uri)
+ simple_uri.params = {}
+ simplified[str(simple_uri)] = uri
+
+ copyfiles = {}
+ extralines = extralines or []
+ for newfile, srcfile in files.iteritems():
+ src_destdir = os.path.dirname(srcfile)
+ if not args.use_workdir:
+ if rd.getVar('S', True) == rd.getVar('STAGING_KERNEL_DIR', True):
+ srcdir = os.path.join(workdir, 'git')
+ if not bb.data.inherits_class('kernel-yocto', rd):
+ logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
+ src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
+ src_destdir = os.path.normpath(src_destdir)
+
+ source_uri = 'file://{0}'.format(os.path.basename(srcfile))
+ if src_destdir and src_destdir != '.':
+ source_uri += ';subdir={0}'.format(src_destdir)
+
+ simple = bb.fetch.URI(source_uri)
+ simple.params = {}
+ simple_str = str(simple)
+ if simple_str in simplified:
+ existing = simplified[simple_str]
+ if source_uri != existing:
+ logger.warn('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
+ else:
+ logger.warn('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ else:
+ extralines.append('SRC_URI += {0}'.format(source_uri))
+ copyfiles[newfile] = srcfile
+
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
+
+
+def appendsrcfiles(parser, args):
+ recipedata = _parse_recipe(args.recipe, tinfoil)
+ if not recipedata:
+ parser.error('RECIPE must be a valid recipe name')
+
+ files = dict((f, os.path.join(args.destdir, os.path.basename(f)))
+ for f in args.files)
+ return appendsrc(args, files, recipedata)
+
+
+def appendsrcfile(parser, args):
+ recipedata = _parse_recipe(args.recipe, tinfoil)
+ if not recipedata:
+ parser.error('RECIPE must be a valid recipe name')
+
+ if not args.destfile:
+ args.destfile = os.path.basename(args.file)
+ elif args.destfile.endswith('/'):
+ args.destfile = os.path.join(args.destfile, os.path.basename(args.file))
+
+ return appendsrc(args, {args.file: args.destfile}, recipedata)
+
+
+def layer(layerpath):
+ if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
+ raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
+ return layerpath
+
+
+def existing_path(filepath):
+ if not os.path.exists(filepath):
+ raise argparse.ArgumentTypeError('{0!r} must be an existing path'.format(filepath))
+ return filepath
+
+
+def existing_file(filepath):
+ filepath = existing_path(filepath)
+ if os.path.isdir(filepath):
+ raise argparse.ArgumentTypeError('{0!r} must be a file, not a directory'.format(filepath))
+ return filepath
+
+
+def destination_path(destpath):
+ if os.path.isabs(destpath):
+ raise argparse.ArgumentTypeError('{0!r} must be a relative path, not absolute'.format(destpath))
+ return destpath
+
+
+def target_path(targetpath):
+ if not os.path.isabs(targetpath):
+ raise argparse.ArgumentTypeError('{0!r} must be an absolute path, not relative'.format(targetpath))
+ return targetpath
+
+
+def register_commands(subparsers):
+ common = argparse.ArgumentParser(add_help=False)
+ common.add_argument('-m', '--machine', help='Make bbappend changes specific to a machine only', metavar='MACHINE')
+ common.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
+ common.add_argument('destlayer', metavar='DESTLAYER', help='Base directory of the destination layer to write the bbappend to', type=layer)
+
+ parser_appendfile = subparsers.add_parser('appendfile',
+ parents=[common],
+ help='Create/update a bbappend to replace a target file',
+ description='Creates a bbappend (or updates an existing one) to replace the specified file that appears in the target system, determining the recipe that packages the file and the required path and name for the bbappend automatically. Note that the ability to determine the recipe packaging a particular file depends upon the recipe\'s do_packagedata task having already run prior to running this command (which it will have when the recipe has been built successfully, which in turn will have happened if one or more of the recipe\'s packages is included in an image that has been built successfully).')
+ parser_appendfile.add_argument('targetpath', help='Path to the file to be replaced (as it would appear within the target image, e.g. /etc/motd)', type=target_path)
+ parser_appendfile.add_argument('newfile', help='Custom file to replace the target file with', type=existing_file)
+ parser_appendfile.add_argument('-r', '--recipe', help='Override recipe to apply to (default is to find which recipe already packages the file)')
+ parser_appendfile.set_defaults(func=appendfile, parserecipes=True)
+
+ common_src = argparse.ArgumentParser(add_help=False, parents=[common])
+ common_src.add_argument('-W', '--workdir', help='Unpack file into WORKDIR rather than S', dest='use_workdir', action='store_true')
+ common_src.add_argument('recipe', metavar='RECIPE', help='Override recipe to apply to')
+
+ parser = subparsers.add_parser('appendsrcfiles',
+ parents=[common_src],
+ help='Create/update a bbappend to add or replace source files',
+ description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
+ parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
+ parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
+ parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
+
+ parser = subparsers.add_parser('appendsrcfile',
+ parents=[common_src],
+ help='Create/update a bbappend to add or replace a source file',
+ description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
+ parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
+ parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
+ parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
new file mode 100644
index 0000000..1f85fcf
--- /dev/null
+++ b/scripts/lib/recipetool/create.py
@@ -0,0 +1,963 @@
+# Recipe creation tool - create command plugin
+#
+# Copyright (C) 2014-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import json
+import logging
+import scriptutils
+import urlparse
+import hashlib
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def plugin_init(pluginlist):
+ # Take a reference to the list so we can use it later
+ global plugins
+ plugins = pluginlist
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+class RecipeHandler(object):
+ recipelibmap = {}
+ recipeheadermap = {}
+ recipecmakefilemap = {}
+ recipebinmap = {}
+
+ @staticmethod
+ def load_libmap(d):
+ '''Load library->recipe mapping'''
+ import oe.package
+
+ if RecipeHandler.recipelibmap:
+ return
+ # First build up library->package mapping
+ shlib_providers = oe.package.read_shlib_providers(d)
+ libdir = d.getVar('libdir', True)
+ base_libdir = d.getVar('base_libdir', True)
+ libpaths = list(set([base_libdir, libdir]))
+ libname_re = re.compile('^lib(.+)\.so.*$')
+ pkglibmap = {}
+ for lib, item in shlib_providers.iteritems():
+ for path, pkg in item.iteritems():
+ if path in libpaths:
+ res = libname_re.match(lib)
+ if res:
+ libname = res.group(1)
+ if not libname in pkglibmap:
+ pkglibmap[libname] = pkg[0]
+ else:
+ logger.debug('unable to extract library name from %s' % lib)
+
+ # Now turn it into a library->recipe mapping
+ pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ for libname, pkg in pkglibmap.iteritems():
+ try:
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
+ for line in f:
+ if line.startswith('PN:'):
+ RecipeHandler.recipelibmap[libname] = line.split(':', 1)[-1].strip()
+ break
+ except IOError as ioe:
+ if ioe.errno == 2:
+ logger.warn('unable to find a pkgdata file for package %s' % pkg)
+ else:
+ raise
+
+ # Some overrides - these should be mapped to the virtual
+ RecipeHandler.recipelibmap['GL'] = 'virtual/libgl'
+ RecipeHandler.recipelibmap['EGL'] = 'virtual/egl'
+ RecipeHandler.recipelibmap['GLESv2'] = 'virtual/libgles2'
+
+ @staticmethod
+ def load_devel_filemap(d):
+ '''Build up development file->recipe mapping'''
+ if RecipeHandler.recipeheadermap:
+ return
+ pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+ includedir = d.getVar('includedir', True)
+ cmakedir = os.path.join(d.getVar('libdir', True), 'cmake')
+ for pkg in glob.glob(os.path.join(pkgdata_dir, 'runtime', '*-dev')):
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
+ pn = None
+ headers = []
+ cmakefiles = []
+ for line in f:
+ if line.startswith('PN:'):
+ pn = line.split(':', 1)[-1].strip()
+ elif line.startswith('FILES_INFO:'):
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ for fullpth in sorted(dictval):
+ if fullpth.startswith(includedir) and fullpth.endswith('.h'):
+ headers.append(os.path.relpath(fullpth, includedir))
+ elif fullpth.startswith(cmakedir) and fullpth.endswith('.cmake'):
+ cmakefiles.append(os.path.relpath(fullpth, cmakedir))
+ if pn and headers:
+ for header in headers:
+ RecipeHandler.recipeheadermap[header] = pn
+ if pn and cmakefiles:
+ for fn in cmakefiles:
+ RecipeHandler.recipecmakefilemap[fn] = pn
+
+ @staticmethod
+ def load_binmap(d):
+ '''Build up native binary->recipe mapping'''
+ if RecipeHandler.recipebinmap:
+ return
+ sstate_manifests = d.getVar('SSTATE_MANIFESTS', True)
+ staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE', True)
+ build_arch = d.getVar('BUILD_ARCH', True)
+ fileprefix = 'manifest-%s-' % build_arch
+ for fn in glob.glob(os.path.join(sstate_manifests, '%s*-native.populate_sysroot' % fileprefix)):
+ with open(fn, 'r') as f:
+ pn = os.path.basename(fn).rsplit('.', 1)[0][len(fileprefix):]
+ for line in f:
+ if line.startswith(staging_bindir_native):
+ prog = os.path.basename(line.rstrip())
+ RecipeHandler.recipebinmap[prog] = pn
+
+ @staticmethod
+ def checkfiles(path, speclist, recursive=False):
+ results = []
+ if recursive:
+ for root, _, files in os.walk(path):
+ for fn in files:
+ for spec in speclist:
+ if fnmatch.fnmatch(fn, spec):
+ results.append(os.path.join(root, fn))
+ else:
+ for spec in speclist:
+ results.extend(glob.glob(os.path.join(path, spec)))
+ return results
+
+ @staticmethod
+ def handle_depends(libdeps, pcdeps, deps, outlines, values, d):
+ if pcdeps:
+ recipemap = read_pkgconfig_provides(d)
+ if libdeps:
+ RecipeHandler.load_libmap(d)
+
+ ignorelibs = ['socket']
+ ignoredeps = ['gcc-runtime', 'glibc', 'uclibc', 'musl', 'tar-native', 'binutils-native', 'coreutils-native']
+
+ unmappedpc = []
+ pcdeps = list(set(pcdeps))
+ for pcdep in pcdeps:
+ if isinstance(pcdep, basestring):
+ recipe = recipemap.get(pcdep, None)
+ if recipe:
+ deps.append(recipe)
+ else:
+ if not pcdep.startswith('$'):
+ unmappedpc.append(pcdep)
+ else:
+ for item in pcdep:
+ recipe = recipemap.get(pcdep, None)
+ if recipe:
+ deps.append(recipe)
+ break
+ else:
+ unmappedpc.append('(%s)' % ' or '.join(pcdep))
+
+ unmappedlibs = []
+ for libdep in libdeps:
+ if isinstance(libdep, tuple):
+ lib, header = libdep
+ else:
+ lib = libdep
+ header = None
+
+ if lib in ignorelibs:
+ logger.debug('Ignoring library dependency %s' % lib)
+ continue
+
+ recipe = RecipeHandler.recipelibmap.get(lib, None)
+ if recipe:
+ deps.append(recipe)
+ elif recipe is None:
+ if header:
+ RecipeHandler.load_devel_filemap(d)
+ recipe = RecipeHandler.recipeheadermap.get(header, None)
+ if recipe:
+ deps.append(recipe)
+ elif recipe is None:
+ unmappedlibs.append(lib)
+ else:
+ unmappedlibs.append(lib)
+
+ deps = set(deps).difference(set(ignoredeps))
+
+ if unmappedpc:
+ outlines.append('# NOTE: unable to map the following pkg-config dependencies: %s' % ' '.join(unmappedpc))
+ outlines.append('# (this is based on recipes that have previously been built and packaged)')
+
+ if unmappedlibs:
+ outlines.append('# NOTE: the following library dependencies are unknown, ignoring: %s' % ' '.join(list(set(unmappedlibs))))
+ outlines.append('# (this is based on recipes that have previously been built and packaged)')
+
+ if deps:
+ values['DEPENDS'] = ' '.join(deps)
+
+ def genfunction(self, outlines, funcname, content, python=False, forcespace=False):
+ if python:
+ prefix = 'python '
+ else:
+ prefix = ''
+ outlines.append('%s%s () {' % (prefix, funcname))
+ if python or forcespace:
+ indent = ' '
+ else:
+ indent = '\t'
+ addnoop = not python
+ for line in content:
+ outlines.append('%s%s' % (indent, line))
+ if addnoop:
+ strippedline = line.lstrip()
+ if strippedline and not strippedline.startswith('#'):
+ addnoop = False
+ if addnoop:
+ # Without this there'll be a syntax error
+ outlines.append('%s:' % indent)
+ outlines.append('}')
+ outlines.append('')
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ return False
+
+
+def validate_pv(pv):
+ if not pv or '_version' in pv.lower() or pv[0] not in '0123456789':
+ return False
+ return True
+
+def determine_from_filename(srcfile):
+ """Determine name and version from a filename"""
+ part = ''
+ if '.tar.' in srcfile:
+ namepart = srcfile.split('.tar.')[0].lower()
+ else:
+ namepart = os.path.splitext(srcfile)[0].lower()
+ splitval = namepart.rsplit('_', 1)
+ if len(splitval) == 1:
+ splitval = namepart.rsplit('-', 1)
+ pn = splitval[0].replace('_', '-')
+ if len(splitval) > 1:
+ if splitval[1][0] in '0123456789':
+ pv = splitval[1]
+ else:
+ pn = '-'.join(splitval).replace('_', '-')
+ pv = None
+ else:
+ pv = None
+ return (pn, pv)
+
+def determine_from_url(srcuri):
+ """Determine name and version from a URL"""
+ pn = None
+ pv = None
+ parseres = urlparse.urlparse(srcuri.lower().split(';', 1)[0])
+ if parseres.path:
+ if 'github.com' in parseres.netloc:
+ res = re.search(r'.*/(.*?)/archive/(.*)-final\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+ else:
+ res = re.search(r'.*/(.*?)/archive/v?(.*)\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+ elif 'bitbucket.org' in parseres.netloc:
+ res = re.search(r'.*/(.*?)/get/[a-zA-Z_-]*([0-9][0-9a-zA-Z_.]*)\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+
+ if not pn and not pv:
+ srcfile = os.path.basename(parseres.path.rstrip('/'))
+ pn, pv = determine_from_filename(srcfile)
+
+ logger.debug('Determined from source URL: name = "%s", version = "%s"' % (pn, pv))
+ return (pn, pv)
+
+def supports_srcrev(uri):
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ # This is a bit sad, but if you don't have this set there can be some
+ # odd interactions with the urldata cache which lead to errors
+ localdata.setVar('SRCREV', '${AUTOREV}')
+ bb.data.update_data(localdata)
+ fetcher = bb.fetch2.Fetch([uri], localdata)
+ urldata = fetcher.ud
+ for u in urldata:
+ if urldata[u].method.supports_srcrev():
+ return True
+ return False
+
+def reformat_git_uri(uri):
+ '''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
+ checkuri = uri.split(';', 1)[0]
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ res = re.match('(https?)://([^;]+(\.git)?)(;.*)?$', uri)
+ if res:
+ # Need to switch the URI around so that the git fetcher is used
+ return 'git://%s;protocol=%s%s' % (res.group(2), res.group(1), res.group(4) or '')
+ return uri
+
+def create_recipe(args):
+ import bb.process
+ import tempfile
+ import shutil
+
+ pkgarch = ""
+ if args.machine:
+ pkgarch = "${MACHINE_ARCH}"
+
+ checksums = (None, None)
+ tempsrc = ''
+ srcsubdir = ''
+ srcrev = '${AUTOREV}'
+ if '://' in args.source:
+ # Fetch a URL
+ fetchuri = reformat_git_uri(urlparse.urldefrag(args.source)[0])
+ if args.binary:
+ # Assume the archive contains the directory structure verbatim
+ # so we need to extract to a subdirectory
+ fetchuri += ';subdir=%s' % os.path.splitext(os.path.basename(urlparse.urlsplit(fetchuri).path))[0]
+ srcuri = fetchuri
+ rev_re = re.compile(';rev=([^;]+)')
+ res = rev_re.search(srcuri)
+ if res:
+ srcrev = res.group(1)
+ srcuri = rev_re.sub('', srcuri)
+ tempsrc = tempfile.mkdtemp(prefix='recipetool-')
+ srctree = tempsrc
+ if fetchuri.startswith('npm://'):
+ # Check if npm is available
+ npm = bb.utils.which(tinfoil.config_data.getVar('PATH', True), 'npm')
+ if not npm:
+ logger.error('npm:// URL requested but npm is not available - you need to either build nodejs-native or install npm using your package manager')
+ sys.exit(1)
+ logger.info('Fetching %s...' % srcuri)
+ try:
+ checksums = scriptutils.fetch_uri(tinfoil.config_data, fetchuri, srctree, srcrev)
+ except bb.fetch2.BBFetchException as e:
+ logger.error(str(e).rstrip())
+ sys.exit(1)
+ dirlist = os.listdir(srctree)
+ if 'git.indirectionsymlink' in dirlist:
+ dirlist.remove('git.indirectionsymlink')
+ if len(dirlist) == 1:
+ singleitem = os.path.join(srctree, dirlist[0])
+ if os.path.isdir(singleitem):
+ # We unpacked a single directory, so we should use that
+ srcsubdir = dirlist[0]
+ srctree = os.path.join(srctree, srcsubdir)
+ else:
+ with open(singleitem, 'r') as f:
+ if '<html' in f.read(100).lower():
+ logger.error('Fetching "%s" returned a single HTML page - check the URL is correct and functional' % fetchuri)
+ sys.exit(1)
+ else:
+ # Assume we're pointing to an existing source tree
+ if args.extract_to:
+ logger.error('--extract-to cannot be specified if source is a directory')
+ sys.exit(1)
+ if not os.path.isdir(args.source):
+ logger.error('Invalid source directory %s' % args.source)
+ sys.exit(1)
+ srctree = args.source
+ srcuri = ''
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Try to get upstream repo location from origin remote
+ try:
+ stdout, _ = bb.process.run('git remote -v', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ for line in stdout.splitlines():
+ splitline = line.split()
+ if len(splitline) > 1:
+ if splitline[0] == 'origin' and '://' in splitline[1]:
+ srcuri = reformat_git_uri(splitline[1])
+ srcsubdir = 'git'
+ break
+
+ if args.src_subdir:
+ srcsubdir = os.path.join(srcsubdir, args.src_subdir)
+ srctree_use = os.path.join(srctree, args.src_subdir)
+ else:
+ srctree_use = srctree
+
+ if args.outfile and os.path.isdir(args.outfile):
+ outfile = None
+ outdir = args.outfile
+ else:
+ outfile = args.outfile
+ outdir = None
+ if outfile and outfile != '-':
+ if os.path.exists(outfile):
+ logger.error('Output file %s already exists' % outfile)
+ sys.exit(1)
+
+ lines_before = []
+ lines_after = []
+
+ lines_before.append('# Recipe created by %s' % os.path.basename(sys.argv[0]))
+ lines_before.append('# This is the basis of a recipe and may need further editing in order to be fully functional.')
+ lines_before.append('# (Feel free to remove these comments when editing.)')
+ lines_before.append('#')
+
+ licvalues = guess_license(srctree_use)
+ lic_files_chksum = []
+ if licvalues:
+ licenses = []
+ for licvalue in licvalues:
+ if not licvalue[0] in licenses:
+ licenses.append(licvalue[0])
+ lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
+ lines_before.append('# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
+ lines_before.append('# your responsibility to verify that the values are complete and correct.')
+ if len(licvalues) > 1:
+ lines_before.append('#')
+ lines_before.append('# NOTE: multiple licenses have been detected; if that is correct you should separate')
+ lines_before.append('# these in the LICENSE value using & if the multiple licenses all apply, or | if there')
+ lines_before.append('# is a choice between the multiple licenses. If in doubt, check the accompanying')
+ lines_before.append('# documentation to determine which situation is applicable.')
+ else:
+ lines_before.append('# Unable to find any files that looked like license statements. Check the accompanying')
+ lines_before.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
+ lines_before.append('#')
+ lines_before.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
+ lines_before.append('# this is not accurate with respect to the licensing of the software being built (it')
+ lines_before.append('# will not be in most cases) you must specify the correct value before using this')
+ lines_before.append('# recipe for anything other than initial testing/development!')
+ licenses = ['CLOSED']
+ lines_before.append('LICENSE = "%s"' % ' '.join(licenses))
+ lines_before.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
+ lines_before.append('')
+
+ classes = []
+
+ # FIXME This is kind of a hack, we probably ought to be using bitbake to do this
+ pn = None
+ pv = None
+ if outfile:
+ recipefn = os.path.splitext(os.path.basename(outfile))[0]
+ fnsplit = recipefn.split('_')
+ if len(fnsplit) > 1:
+ pn = fnsplit[0]
+ pv = fnsplit[1]
+ else:
+ pn = recipefn
+
+ if args.version:
+ pv = args.version
+
+ if args.name:
+ pn = args.name
+ if args.name.endswith('-native'):
+ if args.also_native:
+ logger.error('--also-native cannot be specified for a recipe named *-native (*-native denotes a recipe that is already only for native) - either remove the -native suffix from the name or drop --also-native')
+ sys.exit(1)
+ classes.append('native')
+ elif args.name.startswith('nativesdk-'):
+ if args.also_native:
+ logger.error('--also-native cannot be specified for a recipe named nativesdk-* (nativesdk-* denotes a recipe that is already only for nativesdk)')
+ sys.exit(1)
+ classes.append('nativesdk')
+
+ if pv and pv not in 'git svn hg'.split():
+ realpv = pv
+ else:
+ realpv = None
+
+ if srcuri and not realpv or not pn:
+ name_pn, name_pv = determine_from_url(srcuri)
+ if name_pn and not pn:
+ pn = name_pn
+ if name_pv and not realpv:
+ realpv = name_pv
+
+
+ if not srcuri:
+ lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
+ lines_before.append('SRC_URI = "%s"' % srcuri)
+ (md5value, sha256value) = checksums
+ if md5value:
+ lines_before.append('SRC_URI[md5sum] = "%s"' % md5value)
+ if sha256value:
+ lines_before.append('SRC_URI[sha256sum] = "%s"' % sha256value)
+ if srcuri and supports_srcrev(srcuri):
+ lines_before.append('')
+ lines_before.append('# Modify these as desired')
+ lines_before.append('PV = "%s+git${SRCPV}"' % (realpv or '1.0'))
+ lines_before.append('SRCREV = "%s"' % srcrev)
+ lines_before.append('')
+
+ if srcsubdir:
+ lines_before.append('S = "${WORKDIR}/%s"' % srcsubdir)
+ lines_before.append('')
+
+ if pkgarch:
+ lines_after.append('PACKAGE_ARCH = "%s"' % pkgarch)
+ lines_after.append('')
+
+ if args.binary:
+ lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
+ lines_after.append('')
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading recipe handlers')
+ raw_handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_recipe_handlers'):
+ plugin.register_recipe_handlers(raw_handlers)
+ # Sort handlers by priority
+ handlers = []
+ for i, handler in enumerate(raw_handlers):
+ if isinstance(handler, tuple):
+ handlers.append((handler[0], handler[1], i))
+ else:
+ handlers.append((handler, 0, i))
+ handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
+ for handler, priority, _ in handlers:
+ logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ handlers = [item[0] for item in handlers]
+
+ # Apply the handlers
+ handled = []
+ handled.append(('license', licvalues))
+
+ if args.binary:
+ classes.append('bin_package')
+ handled.append('buildsystem')
+
+ extravalues = {}
+ for handler in handlers:
+ handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+
+ extrafiles = extravalues.pop('extrafiles', {})
+
+ if not realpv:
+ realpv = extravalues.get('PV', None)
+ if realpv:
+ if not validate_pv(realpv):
+ realpv = None
+ else:
+ realpv = realpv.lower().split()[0]
+ if '_' in realpv:
+ realpv = realpv.replace('_', '-')
+ if not pn:
+ pn = extravalues.get('PN', None)
+ if pn:
+ if pn.startswith('GNU '):
+ pn = pn[4:]
+ if ' ' in pn:
+ # Probably a descriptive identifier rather than a proper name
+ pn = None
+ else:
+ pn = pn.lower()
+ if '_' in pn:
+ pn = pn.replace('_', '-')
+
+ if not outfile:
+ if not pn:
+ logger.error('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile')
+ # devtool looks for this specific exit code, so don't change it
+ sys.exit(15)
+ else:
+ if srcuri and srcuri.startswith(('git://', 'hg://', 'svn://')):
+ outfile = '%s_%s.bb' % (pn, srcuri.split(':', 1)[0])
+ elif realpv:
+ outfile = '%s_%s.bb' % (pn, realpv)
+ else:
+ outfile = '%s.bb' % pn
+ if outdir:
+ outfile = os.path.join(outdir, outfile)
+ # We need to check this again
+ if os.path.exists(outfile):
+ logger.error('Output file %s already exists' % outfile)
+ sys.exit(1)
+
+ # Move any extra files the plugins created to a directory next to the recipe
+ if extrafiles:
+ if outfile == '-':
+ extraoutdir = pn
+ else:
+ extraoutdir = os.path.join(os.path.dirname(outfile), pn)
+ bb.utils.mkdirhier(extraoutdir)
+ for destfn, extrafile in extrafiles.iteritems():
+ shutil.move(extrafile, os.path.join(extraoutdir, destfn))
+
+ lines = lines_before
+ lines_before = []
+ skipblank = True
+ for line in lines:
+ if skipblank:
+ skipblank = False
+ if not line:
+ continue
+ if line.startswith('S = '):
+ if realpv and pv not in 'git svn hg'.split():
+ line = line.replace(realpv, '${PV}')
+ if pn:
+ line = line.replace(pn, '${BPN}')
+ if line == 'S = "${WORKDIR}/${BPN}-${PV}"':
+ skipblank = True
+ continue
+ elif line.startswith('SRC_URI = '):
+ if realpv:
+ line = line.replace(realpv, '${PV}')
+ elif line.startswith('PV = '):
+ if realpv:
+ line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
+ lines_before.append(line)
+
+ if args.also_native:
+ lines = lines_after
+ lines_after = []
+ bbclassextend = None
+ for line in lines:
+ if line.startswith('BBCLASSEXTEND ='):
+ splitval = line.split('"')
+ if len(splitval) > 1:
+ bbclassextend = splitval[1].split()
+ if not 'native' in bbclassextend:
+ bbclassextend.insert(0, 'native')
+ line = 'BBCLASSEXTEND = "%s"' % ' '.join(bbclassextend)
+ lines_after.append(line)
+ if not bbclassextend:
+ lines_after.append('BBCLASSEXTEND = "native"')
+
+ outlines = []
+ outlines.extend(lines_before)
+ if classes:
+ if outlines[-1] and not outlines[-1].startswith('#'):
+ outlines.append('')
+ outlines.append('inherit %s' % ' '.join(classes))
+ outlines.append('')
+ outlines.extend(lines_after)
+
+ if args.extract_to:
+ scriptutils.git_convert_standalone_clone(srctree)
+ if os.path.isdir(args.extract_to):
+ # If the directory exists we'll move the temp dir into it instead of
+ # its contents - of course, we could try to always move its contents
+ # but that is a pain if there are symlinks; the simplest solution is
+ # to just remove it first
+ os.rmdir(args.extract_to)
+ shutil.move(srctree, args.extract_to)
+ if tempsrc == srctree:
+ tempsrc = None
+ logger.info('Source extracted to %s' % args.extract_to)
+
+ if outfile == '-':
+ sys.stdout.write('\n'.join(outlines) + '\n')
+ else:
+ with open(outfile, 'w') as f:
+ f.write('\n'.join(outlines) + '\n')
+ logger.info('Recipe %s has been created; further editing may be required to make it fully functional' % outfile)
+
+ if tempsrc:
+ shutil.rmtree(tempsrc)
+
+ return 0
+
+def get_license_md5sums(d, static_only=False):
+ import bb.utils
+ md5sums = {}
+ if not static_only:
+ # Gather md5sums of license files in common license dir
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR', True)
+ for fn in os.listdir(commonlicdir):
+ md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
+ md5sums[md5value] = fn
+ # The following were extracted from common values in various recipes
+ # (double checking the license against the license file itself, not just
+ # the LICENSE value in the recipe)
+ md5sums['94d55d512a9ba36caa9b7df079bae19f'] = 'GPLv2'
+ md5sums['b234ee4d69f5fce4486a80fdaf4a4263'] = 'GPLv2'
+ md5sums['59530bdf33659b29e73d4adb9f9f6552'] = 'GPLv2'
+ md5sums['0636e73ff0215e8d672dc4c32c317bb3'] = 'GPLv2'
+ md5sums['eb723b61539feef013de476e68b5c50a'] = 'GPLv2'
+ md5sums['751419260aa954499f7abaabaa882bbe'] = 'GPLv2'
+ md5sums['393a5ca445f6965873eca0259a17f833'] = 'GPLv2'
+ md5sums['12f884d2ae1ff87c09e5b7ccc2c4ca7e'] = 'GPLv2'
+ md5sums['8ca43cbc842c2336e835926c2166c28b'] = 'GPLv2'
+ md5sums['ebb5c50ab7cab4baeffba14977030c07'] = 'GPLv2'
+ md5sums['c93c0550bd3173f4504b2cbd8991e50b'] = 'GPLv2'
+ md5sums['9ac2e7cff1ddaf48b6eab6028f23ef88'] = 'GPLv2'
+ md5sums['4325afd396febcb659c36b49533135d4'] = 'GPLv2'
+ md5sums['18810669f13b87348459e611d31ab760'] = 'GPLv2'
+ md5sums['d7810fab7487fb0aad327b76f1be7cd7'] = 'GPLv2' # the Linux kernel's COPYING file
+ md5sums['bbb461211a33b134d42ed5ee802b37ff'] = 'LGPLv2.1'
+ md5sums['7fbc338309ac38fefcd64b04bb903e34'] = 'LGPLv2.1'
+ md5sums['4fbd65380cdd255951079008b364516c'] = 'LGPLv2.1'
+ md5sums['2d5025d4aa3495befef8f17206a5b0a1'] = 'LGPLv2.1'
+ md5sums['fbc093901857fcd118f065f900982c24'] = 'LGPLv2.1'
+ md5sums['a6f89e2100d9b6cdffcea4f398e37343'] = 'LGPLv2.1'
+ md5sums['d8045f3b8f929c1cb29a1e3fd737b499'] = 'LGPLv2.1'
+ md5sums['fad9b3332be894bab9bc501572864b29'] = 'LGPLv2.1'
+ md5sums['3bf50002aefd002f49e7bb854063f7e7'] = 'LGPLv2'
+ md5sums['9f604d8a4f8e74f4f5140845a21b6674'] = 'LGPLv2'
+ md5sums['5f30f0716dfdd0d91eb439ebec522ec2'] = 'LGPLv2'
+ md5sums['55ca817ccb7d5b5b66355690e9abc605'] = 'LGPLv2'
+ md5sums['252890d9eee26aab7b432e8b8a616475'] = 'LGPLv2'
+ md5sums['3214f080875748938ba060314b4f727d'] = 'LGPLv2'
+ md5sums['db979804f025cf55aabec7129cb671ed'] = 'LGPLv2'
+ md5sums['d32239bcb673463ab874e80d47fae504'] = 'GPLv3'
+ md5sums['f27defe1e96c2e1ecd4e0c9be8967949'] = 'GPLv3'
+ md5sums['6a6a8e020838b23406c81b19c1d46df6'] = 'LGPLv3'
+ md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
+ md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
+ md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+ return md5sums
+
+def crunch_license(licfile):
+ '''
+ Remove non-material text from a license file and then check
+ its md5sum against a known list. This works well for licenses
+ which contain a copyright statement, but is also a useful way
+ to handle people's insistence upon reformatting the license text
+ slightly (with no material difference to the text of the
+ license).
+ '''
+
+ import oe.utils
+
+ # Note: these are carefully constructed!
+ license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
+ license_statement_re = re.compile('^This (project|software) is( free software)? released under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile('^(#+)? *Copyright .*$')
+
+ crunched_md5sums = {}
+ # The following two were gleaned from the "forever" npm package
+ crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
+ crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
+ # https://github.com/vasi/pixz/blob/master/LICENSE
+ crunched_md5sums['2f03392b40bbe663597b5bd3cc5ebdb9'] = 'BSD-2-Clause'
+ # https://github.com/waffle-gl/waffle/blob/master/LICENSE.txt
+ crunched_md5sums['e72e5dfef0b1a4ca8a3d26a60587db66'] = 'BSD-2-Clause'
+ # https://github.com/spigwitmer/fakeds1963s/blob/master/LICENSE
+ crunched_md5sums['8be76ac6d191671f347ee4916baa637e'] = 'GPLv2'
+ # https://github.com/datto/dattobd/blob/master/COPYING
+ # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/GPLv2.TXT
+ crunched_md5sums['1d65c5ad4bf6489f85f4812bf08ae73d'] = 'GPLv2'
+ # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+ # http://git.neil.brown.name/?p=mdadm.git;a=blob;f=COPYING;h=d159169d1050894d3ea3b98e1c965c4058208fe1;hb=HEAD
+ crunched_md5sums['fb530f66a7a89ce920f0e912b5b66d4b'] = 'GPLv2'
+ # https://github.com/gkos/nrf24/blob/master/COPYING
+ crunched_md5sums['7b6aaa4daeafdfa6ed5443fd2684581b'] = 'GPLv2'
+ # https://github.com/josch09/resetusb/blob/master/COPYING
+ crunched_md5sums['8b8ac1d631a4d220342e83bcf1a1fbc3'] = 'GPLv3'
+ # https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv2.1
+ crunched_md5sums['2ea316ed973ae176e502e2297b574bb3'] = 'LGPLv2.1'
+ # unixODBC-2.3.4 COPYING
+ crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
+ # https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
+ crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ lictext = []
+ with open(licfile, 'r') as f:
+ for line in f:
+ # Drop opening statements
+ if copyright_re.match(line):
+ continue
+ elif license_title_re.match(line):
+ continue
+ elif license_statement_re.match(line):
+ continue
+ # Squash spaces, and replace smart quotes, double quotes
+ # and backticks with single quotes
+ line = oe.utils.squashspaces(line.strip()).decode("utf-8")
+ line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
+ if line:
+ lictext.append(line)
+
+ m = hashlib.md5()
+ try:
+ m.update(' '.join(lictext))
+ md5val = m.hexdigest()
+ except UnicodeEncodeError:
+ md5val = None
+ lictext = ''
+ license = crunched_md5sums.get(md5val, None)
+ return license, md5val, lictext
+
+def guess_license(srctree):
+ import bb
+ md5sums = get_license_md5sums(tinfoil.config_data)
+
+ licenses = []
+ licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*']
+ licfiles = []
+ for root, dirs, files in os.walk(srctree):
+ for fn in files:
+ for spec in licspecs:
+ if fnmatch.fnmatch(fn, spec):
+ fullpath = os.path.join(root, fn)
+ if not fullpath in licfiles:
+ licfiles.append(fullpath)
+ for licfile in licfiles:
+ md5value = bb.utils.md5_file(licfile)
+ license = md5sums.get(md5value, None)
+ if not license:
+ license, crunched_md5, lictext = crunch_license(licfile)
+ if not license:
+ license = 'Unknown'
+ licenses.append((license, os.path.relpath(licfile, srctree), md5value))
+
+ # FIXME should we grab at least one source file with a license header and add that too?
+
+ return licenses
+
+def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn='${PN}'):
+ """
+ Given a list of (license, path, md5sum) as returned by guess_license(),
+ a dict of package name to path mappings, write out a set of
+ package-specific LICENSE values.
+ """
+ pkglicenses = {pn: []}
+ for license, licpath, _ in licvalues:
+ for pkgname, pkgpath in packages.iteritems():
+ if licpath.startswith(pkgpath + '/'):
+ if pkgname in pkglicenses:
+ pkglicenses[pkgname].append(license)
+ else:
+ pkglicenses[pkgname] = [license]
+ break
+ else:
+ # Accumulate on the main package
+ pkglicenses[pn].append(license)
+ outlicenses = {}
+ for pkgname in packages:
+ license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
+ if license == 'Unknown' and pkgname in fallback_licenses:
+ license = fallback_licenses[pkgname]
+ outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
+ outlicenses[pkgname] = license.split()
+ return outlicenses
+
+def read_pkgconfig_provides(d):
+ pkgdatadir = d.getVar('PKGDATA_DIR', True)
+ pkgmap = {}
+ for fn in glob.glob(os.path.join(pkgdatadir, 'shlibs2', '*.pclist')):
+ with open(fn, 'r') as f:
+ for line in f:
+ pkgmap[os.path.basename(line.rstrip())] = os.path.splitext(os.path.basename(fn))[0]
+ recipemap = {}
+ for pc, pkg in pkgmap.iteritems():
+ pkgdatafile = os.path.join(pkgdatadir, 'runtime', pkg)
+ if os.path.exists(pkgdatafile):
+ with open(pkgdatafile, 'r') as f:
+ for line in f:
+ if line.startswith('PN: '):
+ recipemap[pc] = line.split(':', 1)[1].strip()
+ return recipemap
+
+def convert_pkginfo(pkginfofile):
+ values = {}
+ with open(pkginfofile, 'r') as f:
+ indesc = False
+ for line in f:
+ if indesc:
+ if line.strip():
+ values['DESCRIPTION'] += ' ' + line.strip()
+ else:
+ indesc = False
+ else:
+ splitline = line.split(': ', 1)
+ key = line[0]
+ value = line[1]
+ if key == 'LICENSE':
+ for dep in value.split(','):
+ dep = dep.split()[0]
+ mapped = depmap.get(dep, '')
+ if mapped:
+ depends.append(mapped)
+ elif key == 'License':
+ values['LICENSE'] = value
+ elif key == 'Summary':
+ values['SUMMARY'] = value
+ elif key == 'Description':
+ values['DESCRIPTION'] = value
+ indesc = True
+ return values
+
+def convert_debian(debpath):
+ # FIXME extend this mapping - perhaps use distro_alias.inc?
+ depmap = {'libz-dev': 'zlib'}
+
+ values = {}
+ depends = []
+ with open(os.path.join(debpath, 'control')) as f:
+ indesc = False
+ for line in f:
+ if indesc:
+ if line.strip():
+ if line.startswith(' This package contains'):
+ indesc = False
+ else:
+ values['DESCRIPTION'] += ' ' + line.strip()
+ else:
+ indesc = False
+ else:
+ splitline = line.split(':', 1)
+ key = line[0]
+ value = line[1]
+ if key == 'Build-Depends':
+ for dep in value.split(','):
+ dep = dep.split()[0]
+ mapped = depmap.get(dep, '')
+ if mapped:
+ depends.append(mapped)
+ elif key == 'Section':
+ values['SECTION'] = value
+ elif key == 'Description':
+ values['SUMMARY'] = value
+ indesc = True
+
+ if depends:
+ values['DEPENDS'] = ' '.join(depends)
+
+ return values
+
+
+def register_commands(subparsers):
+ parser_create = subparsers.add_parser('create',
+ help='Create a new recipe',
+ description='Creates a new recipe from a source tree')
+ parser_create.add_argument('source', help='Path or URL to source')
+ parser_create.add_argument('-o', '--outfile', help='Specify filename for recipe to create')
+ parser_create.add_argument('-m', '--machine', help='Make recipe machine-specific as opposed to architecture-specific', action='store_true')
+ parser_create.add_argument('-x', '--extract-to', metavar='EXTRACTPATH', help='Assuming source is a URL, fetch it and extract it to the directory specified as %(metavar)s')
+ parser_create.add_argument('-N', '--name', help='Name to use within recipe (PN)')
+ parser_create.add_argument('-V', '--version', help='Version to use within recipe (PV)')
+ parser_create.add_argument('-b', '--binary', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure)', action='store_true')
+ parser_create.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
+ parser_create.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
+ parser_create.set_defaults(func=create_recipe)
+
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
new file mode 100644
index 0000000..f84ec3d
--- /dev/null
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -0,0 +1,859 @@
+# Recipe creation tool - create command build system handlers
+#
+# Copyright (C) 2014-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+import glob
+from recipetool.create import RecipeHandler, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def plugin_init(pluginlist):
+ # Take a reference to the list so we can use it later
+ global plugins
+ plugins = pluginlist
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class CmakeRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['CMakeLists.txt']):
+ classes.append('cmake')
+ values = CmakeRecipeHandler.extract_cmake_deps(lines_before, srctree, extravalues)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.iteritems():
+ lines_before.append('%s = "%s"' % (var, value))
+ lines_after.append('# Specify any options you want to pass to cmake using EXTRA_OECMAKE:')
+ lines_after.append('EXTRA_OECMAKE = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+ return False
+
+ @staticmethod
+ def extract_cmake_deps(outlines, srctree, extravalues, cmakelistsfile=None):
+ # Find all plugins that want to register handlers
+ logger.debug('Loading cmake handlers')
+ handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_cmake_handlers'):
+ plugin.register_cmake_handlers(handlers)
+
+ values = {}
+ inherits = []
+
+ if cmakelistsfile:
+ srcfiles = [cmakelistsfile]
+ else:
+ srcfiles = RecipeHandler.checkfiles(srctree, ['CMakeLists.txt'])
+
+ # Note that some of these are non-standard, but probably better to
+ # be able to map them anyway if we see them
+ cmake_pkgmap = {'alsa': 'alsa-lib',
+ 'aspell': 'aspell',
+ 'atk': 'atk',
+ 'bison': 'bison-native',
+ 'boost': 'boost',
+ 'bzip2': 'bzip2',
+ 'cairo': 'cairo',
+ 'cups': 'cups',
+ 'curl': 'curl',
+ 'curses': 'ncurses',
+ 'cvs': 'cvs',
+ 'drm': 'libdrm',
+ 'dbus': 'dbus',
+ 'dbusglib': 'dbus-glib',
+ 'egl': 'virtual/egl',
+ 'expat': 'expat',
+ 'flex': 'flex-native',
+ 'fontconfig': 'fontconfig',
+ 'freetype': 'freetype',
+ 'gettext': '',
+ 'git': '',
+ 'gio': 'glib-2.0',
+ 'giounix': 'glib-2.0',
+ 'glew': 'glew',
+ 'glib': 'glib-2.0',
+ 'glib2': 'glib-2.0',
+ 'glu': 'libglu',
+ 'glut': 'freeglut',
+ 'gobject': 'glib-2.0',
+ 'gperf': 'gperf-native',
+ 'gnutls': 'gnutls',
+ 'gtk2': 'gtk+',
+ 'gtk3': 'gtk+3',
+ 'gtk': 'gtk+3',
+ 'harfbuzz': 'harfbuzz',
+ 'icu': 'icu',
+ 'intl': 'virtual/libintl',
+ 'jpeg': 'jpeg',
+ 'libarchive': 'libarchive',
+ 'libiconv': 'virtual/libiconv',
+ 'liblzma': 'xz',
+ 'libxml2': 'libxml2',
+ 'libxslt': 'libxslt',
+ 'opengl': 'virtual/libgl',
+ 'openmp': '',
+ 'openssl': 'openssl',
+ 'pango': 'pango',
+ 'perl': '',
+ 'perllibs': '',
+ 'pkgconfig': '',
+ 'png': 'libpng',
+ 'pthread': '',
+ 'pythoninterp': '',
+ 'pythonlibs': '',
+ 'ruby': 'ruby-native',
+ 'sdl': 'libsdl',
+ 'sdl2': 'libsdl2',
+ 'subversion': 'subversion-native',
+ 'swig': 'swig-native',
+ 'tcl': 'tcl-native',
+ 'threads': '',
+ 'tiff': 'tiff',
+ 'wget': 'wget',
+ 'x11': 'libx11',
+ 'xcb': 'libxcb',
+ 'xext': 'libxext',
+ 'xfixes': 'libxfixes',
+ 'zlib': 'zlib',
+ }
+
+ pcdeps = []
+ libdeps = []
+ deps = []
+ unmappedpkgs = []
+
+ proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+
+ def find_cmake_package(pkg):
+ RecipeHandler.load_devel_filemap(tinfoil.config_data)
+ for fn, pn in RecipeHandler.recipecmakefilemap.iteritems():
+ splitname = fn.split('/')
+ if len(splitname) > 1:
+ if splitname[0].lower().startswith(pkg.lower()):
+ if splitname[1] == '%s-config.cmake' % pkg.lower() or splitname[1] == '%sConfig.cmake' % pkg or splitname[1] == 'Find%s.cmake' % pkg:
+ return pn
+ return None
+
+ def interpret_value(value):
+ return value.strip('"')
+
+ def parse_cmake_file(fn, paths=None):
+ searchpaths = (paths or []) + [os.path.dirname(fn)]
+ logger.debug('Parsing file %s' % fn)
+ with open(fn, 'r') as f:
+ for line in f:
+ line = line.strip()
+ for handler in handlers:
+ if handler.process_line(srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
+ continue
+ res = include_re.match(line)
+ if res:
+ includefn = bb.utils.which(':'.join(searchpaths), res.group(1))
+ if includefn:
+ parse_cmake_file(includefn, searchpaths)
+ else:
+ logger.debug('Unable to recurse into include file %s' % res.group(1))
+ continue
+ res = subdir_re.match(line)
+ if res:
+ subdirfn = os.path.join(os.path.dirname(fn), res.group(1), 'CMakeLists.txt')
+ if os.path.exists(subdirfn):
+ parse_cmake_file(subdirfn, searchpaths)
+ else:
+ logger.debug('Unable to recurse into subdirectory file %s' % subdirfn)
+ continue
+ res = proj_re.match(line)
+ if res:
+ extravalues['PN'] = interpret_value(res.group(1).split()[0])
+ continue
+ res = pkgcm_re.match(line)
+ if res:
+ res = dep_re.findall(res.group(2))
+ if res:
+ pcdeps.extend([interpret_value(x[0]) for x in res])
+ inherits.append('pkgconfig')
+ continue
+ res = pkgsm_re.match(line)
+ if res:
+ res = dep_re.findall(res.group(2))
+ if res:
+ # Note: appending a tuple here!
+ item = tuple((interpret_value(x[0]) for x in res))
+ if len(item) == 1:
+ item = item[0]
+ pcdeps.append(item)
+ inherits.append('pkgconfig')
+ continue
+ res = findpackage_re.match(line)
+ if res:
+ origpkg = res.group(1)
+ pkg = interpret_value(origpkg)
+ found = False
+ for handler in handlers:
+ if handler.process_findpackage(srctree, fn, pkg, deps, outlines, inherits, values):
+ logger.debug('Mapped CMake package %s via handler %s' % (pkg, handler.__class__.__name__))
+ found = True
+ break
+ if found:
+ continue
+ elif pkg == 'Gettext':
+ inherits.append('gettext')
+ elif pkg == 'Perl':
+ inherits.append('perlnative')
+ elif pkg == 'PkgConfig':
+ inherits.append('pkgconfig')
+ elif pkg == 'PythonInterp':
+ inherits.append('pythonnative')
+ elif pkg == 'PythonLibs':
+ inherits.append('python-dir')
+ else:
+ # Try to map via looking at installed CMake packages in pkgdata
+ dep = find_cmake_package(pkg)
+ if dep:
+ logger.debug('Mapped CMake package %s to recipe %s via pkgdata' % (pkg, dep))
+ deps.append(dep)
+ else:
+ dep = cmake_pkgmap.get(pkg.lower(), None)
+ if dep:
+ logger.debug('Mapped CMake package %s to recipe %s via internal list' % (pkg, dep))
+ deps.append(dep)
+ elif dep is None:
+ unmappedpkgs.append(origpkg)
+ continue
+ res = checklib_re.match(line)
+ if res:
+ lib = interpret_value(res.group(1))
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ res = findlibrary_re.match(line)
+ if res:
+ libs = res.group(2).split()
+ for lib in libs:
+ if lib in ['HINTS', 'PATHS', 'PATH_SUFFIXES', 'DOC', 'NAMES_PER_DIR'] or lib.startswith(('NO_', 'CMAKE_', 'ONLY_CMAKE_')):
+ break
+ lib = interpret_value(lib)
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ if line.lower().startswith('useswig'):
+ deps.append('swig-native')
+ continue
+
+ parse_cmake_file(srcfiles[0])
+
+ if unmappedpkgs:
+ outlines.append('# NOTE: unable to map the following CMake package dependencies: %s' % ' '.join(list(set(unmappedpkgs))))
+
+ RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
+
+ for handler in handlers:
+ handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
+
+ if inherits:
+ values['inherit'] = ' '.join(list(set(inherits)))
+
+ return values
+
+
+class CmakeExtensionHandler(object):
+ '''Base class for CMake extension handlers'''
+ def process_line(self, srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
+ '''
+ Handle a line parsed out of an CMake file.
+ Return True if you've completely handled the passed in line, otherwise return False.
+ '''
+ return False
+
+ def process_findpackage(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Handle a find_package package parsed out of a CMake file.
+ Return True if you've completely handled the passed in package, otherwise return False.
+ '''
+ return False
+
+ def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Apply any desired post-processing on the output
+ '''
+ return
+
+
+
+class SconsRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['SConstruct', 'Sconstruct', 'sconstruct']):
+ classes.append('scons')
+ lines_after.append('# Specify any options you want to pass to scons using EXTRA_OESCONS:')
+ lines_after.append('EXTRA_OESCONS = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+ return False
+
+
+class QmakeRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['*.pro']):
+ classes.append('qmake2')
+ handled.append('buildsystem')
+ return True
+ return False
+
+
+class AutotoolsRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ autoconf = False
+ if RecipeHandler.checkfiles(srctree, ['configure.ac', 'configure.in']):
+ autoconf = True
+ values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, extravalues)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.iteritems():
+ lines_before.append('%s = "%s"' % (var, value))
+ else:
+ conffile = RecipeHandler.checkfiles(srctree, ['configure'])
+ if conffile:
+ # Check if this is just a pre-generated autoconf configure script
+ with open(conffile[0], 'r') as f:
+ for i in range(1, 10):
+ if 'Generated by GNU Autoconf' in f.readline():
+ autoconf = True
+ break
+
+ if autoconf and not ('PV' in extravalues and 'PN' in extravalues):
+ # Last resort
+ conffile = RecipeHandler.checkfiles(srctree, ['configure'])
+ if conffile:
+ with open(conffile[0], 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith('VERSION=') or line.startswith('PACKAGE_VERSION='):
+ pv = line.split('=')[1].strip('"\'')
+ if pv and not 'PV' in extravalues and validate_pv(pv):
+ extravalues['PV'] = pv
+ elif line.startswith('PACKAGE_NAME=') or line.startswith('PACKAGE='):
+ pn = line.split('=')[1].strip('"\'')
+ if pn and not 'PN' in extravalues:
+ extravalues['PN'] = pn
+
+ if autoconf:
+ lines_before.append('')
+ lines_before.append('# NOTE: if this software is not capable of being built in a separate build directory')
+ lines_before.append('# from the source, you should replace autotools with autotools-brokensep in the')
+ lines_before.append('# inherit line')
+ classes.append('autotools')
+ lines_after.append('# Specify any options you want to pass to the configure script using EXTRA_OECONF:')
+ lines_after.append('EXTRA_OECONF = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+
+ return False
+
+ @staticmethod
+ def extract_autotools_deps(outlines, srctree, extravalues=None, acfile=None):
+ import shlex
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading autotools handlers')
+ handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_autotools_handlers'):
+ plugin.register_autotools_handlers(handlers)
+
+ values = {}
+ inherits = []
+
+ # Hardcoded map, we also use a dynamic one based on what's in the sysroot
+ progmap = {'flex': 'flex-native',
+ 'bison': 'bison-native',
+ 'm4': 'm4-native',
+ 'tar': 'tar-native',
+ 'ar': 'binutils-native',
+ 'ranlib': 'binutils-native',
+ 'ld': 'binutils-native',
+ 'strip': 'binutils-native',
+ 'libtool': '',
+ 'autoconf': '',
+ 'autoheader': '',
+ 'automake': '',
+ 'uname': '',
+ 'rm': '',
+ 'cp': '',
+ 'mv': '',
+ 'find': '',
+ 'awk': '',
+ 'sed': '',
+ }
+ progclassmap = {'gconftool-2': 'gconf',
+ 'pkg-config': 'pkgconfig',
+ 'python': 'pythonnative',
+ 'python3': 'python3native',
+ 'perl': 'perlnative',
+ 'makeinfo': 'texinfo',
+ }
+
+ pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+
+ defines = {}
+ def subst_defines(value):
+ newvalue = value
+ for define, defval in defines.iteritems():
+ newvalue = newvalue.replace(define, defval)
+ if newvalue != value:
+ return subst_defines(newvalue)
+ return value
+
+ def process_value(value):
+ value = value.replace('[', '').replace(']', '')
+ if value.startswith('m4_esyscmd(') or value.startswith('m4_esyscmd_s('):
+ cmd = subst_defines(value[value.index('(')+1:-1])
+ try:
+ if '|' in cmd:
+ cmd = 'set -o pipefail; ' + cmd
+ stdout, _ = bb.process.run(cmd, cwd=srctree, shell=True)
+ ret = stdout.rstrip()
+ except bb.process.ExecutionError as e:
+ ret = ''
+ elif value.startswith('m4_'):
+ return None
+ ret = subst_defines(value)
+ if ret:
+ ret = ret.strip('"\'')
+ return ret
+
+ # Since a configure.ac file is essentially a program, this is only ever going to be
+ # a hack unfortunately; but it ought to be enough of an approximation
+ if acfile:
+ srcfiles = [acfile]
+ else:
+ srcfiles = RecipeHandler.checkfiles(srctree, ['acinclude.m4', 'configure.ac', 'configure.in'])
+
+ pcdeps = []
+ libdeps = []
+ deps = []
+ unmapped = []
+
+ RecipeHandler.load_binmap(tinfoil.config_data)
+
+ def process_macro(keyword, value):
+ for handler in handlers:
+ if handler.process_macro(srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
+ return
+ if keyword == 'PKG_CHECK_MODULES':
+ res = pkg_re.search(value)
+ if res:
+ res = dep_re.findall(res.group(1))
+ if res:
+ pcdeps.extend([x[0] for x in res])
+ inherits.append('pkgconfig')
+ elif keyword == 'PKG_CHECK_EXISTS':
+ res = pkgce_re.search(value)
+ if res:
+ res = dep_re.findall(res.group(1))
+ if res:
+ pcdeps.extend([x[0] for x in res])
+ inherits.append('pkgconfig')
+ elif keyword in ('AM_GNU_GETTEXT', 'AM_GLIB_GNU_GETTEXT', 'GETTEXT_PACKAGE'):
+ inherits.append('gettext')
+ elif keyword in ('AC_PROG_INTLTOOL', 'IT_PROG_INTLTOOL'):
+ deps.append('intltool-native')
+ elif keyword == 'AM_PATH_GLIB_2_0':
+ deps.append('glib-2.0')
+ elif keyword in ('AC_CHECK_PROG', 'AC_PATH_PROG', 'AX_WITH_PROG'):
+ res = progs_re.search(value)
+ if res:
+ for prog in shlex.split(res.group(1)):
+ prog = prog.split()[0]
+ for handler in handlers:
+ if handler.process_prog(srctree, keyword, value, prog, deps, outlines, inherits, values):
+ return
+ progclass = progclassmap.get(prog, None)
+ if progclass:
+ inherits.append(progclass)
+ else:
+ progdep = RecipeHandler.recipebinmap.get(prog, None)
+ if not progdep:
+ progdep = progmap.get(prog, None)
+ if progdep:
+ deps.append(progdep)
+ elif progdep is None:
+ if not prog.startswith('$'):
+ unmapped.append(prog)
+ elif keyword == 'AC_CHECK_LIB':
+ res = lib_re.search(value)
+ if res:
+ lib = res.group(1)
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ elif keyword == 'AX_CHECK_LIBRARY':
+ res = libx_re.search(value)
+ if res:
+ lib = res.group(2)
+ if not lib.startswith('$'):
+ header = res.group(1)
+ libdeps.append((lib, header))
+ elif keyword == 'AC_PATH_X':
+ deps.append('libx11')
+ elif keyword in ('AX_BOOST', 'BOOST_REQUIRE'):
+ deps.append('boost')
+ elif keyword in ('AC_PROG_LEX', 'AM_PROG_LEX', 'AX_PROG_FLEX'):
+ deps.append('flex-native')
+ elif keyword in ('AC_PROG_YACC', 'AX_PROG_BISON'):
+ deps.append('bison-native')
+ elif keyword == 'AX_CHECK_ZLIB':
+ deps.append('zlib')
+ elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
+ deps.append('openssl')
+ elif keyword == 'AX_LIB_CURL':
+ deps.append('curl')
+ elif keyword == 'AX_LIB_BEECRYPT':
+ deps.append('beecrypt')
+ elif keyword == 'AX_LIB_EXPAT':
+ deps.append('expat')
+ elif keyword == 'AX_LIB_GCRYPT':
+ deps.append('libgcrypt')
+ elif keyword == 'AX_LIB_NETTLE':
+ deps.append('nettle')
+ elif keyword == 'AX_LIB_READLINE':
+ deps.append('readline')
+ elif keyword == 'AX_LIB_SQLITE3':
+ deps.append('sqlite3')
+ elif keyword == 'AX_LIB_TAGLIB':
+ deps.append('taglib')
+ elif keyword == 'AX_PKG_SWIG':
+ deps.append('swig')
+ elif keyword == 'AX_PROG_XSLTPROC':
+ deps.append('libxslt-native')
+ elif keyword == 'AX_WITH_CURSES':
+ deps.append('ncurses')
+ elif keyword == 'AX_PATH_BDB':
+ deps.append('db')
+ elif keyword == 'AX_PATH_LIB_PCRE':
+ deps.append('libpcre')
+ elif keyword == 'AC_INIT':
+ if extravalues is not None:
+ res = ac_init_re.match(value)
+ if res:
+ extravalues['PN'] = process_value(res.group(1))
+ pv = process_value(res.group(2))
+ if validate_pv(pv):
+ extravalues['PV'] = pv
+ elif keyword == 'AM_INIT_AUTOMAKE':
+ if extravalues is not None:
+ if 'PN' not in extravalues:
+ res = am_init_re.match(value)
+ if res:
+ if res.group(1) != 'AC_PACKAGE_NAME':
+ extravalues['PN'] = process_value(res.group(1))
+ pv = process_value(res.group(2))
+ if validate_pv(pv):
+ extravalues['PV'] = pv
+ elif keyword == 'define(':
+ res = define_re.match(value)
+ if res:
+ key = res.group(2).strip('[]')
+ value = process_value(res.group(3))
+ if value is not None:
+ defines[key] = value
+
+ keywords = ['PKG_CHECK_MODULES',
+ 'PKG_CHECK_EXISTS',
+ 'AM_GNU_GETTEXT',
+ 'AM_GLIB_GNU_GETTEXT',
+ 'GETTEXT_PACKAGE',
+ 'AC_PROG_INTLTOOL',
+ 'IT_PROG_INTLTOOL',
+ 'AM_PATH_GLIB_2_0',
+ 'AC_CHECK_PROG',
+ 'AC_PATH_PROG',
+ 'AX_WITH_PROG',
+ 'AC_CHECK_LIB',
+ 'AX_CHECK_LIBRARY',
+ 'AC_PATH_X',
+ 'AX_BOOST',
+ 'BOOST_REQUIRE',
+ 'AC_PROG_LEX',
+ 'AM_PROG_LEX',
+ 'AX_PROG_FLEX',
+ 'AC_PROG_YACC',
+ 'AX_PROG_BISON',
+ 'AX_CHECK_ZLIB',
+ 'AX_CHECK_OPENSSL',
+ 'AX_LIB_CRYPTO',
+ 'AX_LIB_CURL',
+ 'AX_LIB_BEECRYPT',
+ 'AX_LIB_EXPAT',
+ 'AX_LIB_GCRYPT',
+ 'AX_LIB_NETTLE',
+ 'AX_LIB_READLINE'
+ 'AX_LIB_SQLITE3',
+ 'AX_LIB_TAGLIB',
+ 'AX_PKG_SWIG',
+ 'AX_PROG_XSLTPROC',
+ 'AX_WITH_CURSES',
+ 'AX_PATH_BDB',
+ 'AX_PATH_LIB_PCRE',
+ 'AC_INIT',
+ 'AM_INIT_AUTOMAKE',
+ 'define(',
+ ]
+
+ for handler in handlers:
+ handler.extend_keywords(keywords)
+
+ for srcfile in srcfiles:
+ nesting = 0
+ in_keyword = ''
+ partial = ''
+ with open(srcfile, 'r') as f:
+ for line in f:
+ if in_keyword:
+ partial += ' ' + line.strip()
+ if partial.endswith('\\'):
+ partial = partial[:-1]
+ nesting = nesting + line.count('(') - line.count(')')
+ if nesting == 0:
+ process_macro(in_keyword, partial)
+ partial = ''
+ in_keyword = ''
+ else:
+ for keyword in keywords:
+ if keyword in line:
+ nesting = line.count('(') - line.count(')')
+ if nesting > 0:
+ partial = line.strip()
+ if partial.endswith('\\'):
+ partial = partial[:-1]
+ in_keyword = keyword
+ else:
+ process_macro(keyword, line.strip())
+ break
+
+ if in_keyword:
+ process_macro(in_keyword, partial)
+
+ if extravalues:
+ for k,v in extravalues.items():
+ if v:
+ if v.startswith('$') or v.startswith('@') or v.startswith('%'):
+ del extravalues[k]
+ else:
+ extravalues[k] = v.strip('"\'').rstrip('()')
+
+ if unmapped:
+ outlines.append('# NOTE: the following prog dependencies are unknown, ignoring: %s' % ' '.join(list(set(unmapped))))
+
+ RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
+
+ for handler in handlers:
+ handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
+
+ if inherits:
+ values['inherit'] = ' '.join(list(set(inherits)))
+
+ return values
+
+
+class AutotoolsExtensionHandler(object):
+ '''Base class for Autotools extension handlers'''
+ def process_macro(self, srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
+ '''
+ Handle a macro parsed out of an autotools file. Note that if you want this to be called
+ for any macro other than the ones AutotoolsRecipeHandler already looks for, you'll need
+ to add it to the keywords list in extend_keywords().
+ Return True if you've completely handled the passed in macro, otherwise return False.
+ '''
+ return False
+
+ def extend_keywords(self, keywords):
+ '''Adds keywords to be recognised by the parser (so that you get a call to process_macro)'''
+ return
+
+ def process_prog(self, srctree, keyword, value, prog, deps, outlines, inherits, values):
+ '''
+ Handle an AC_PATH_PROG, AC_CHECK_PROG etc. line
+ Return True if you've completely handled the passed in macro, otherwise return False.
+ '''
+ return False
+
+ def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Apply any desired post-processing on the output
+ '''
+ return
+
+
+class MakefileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ makefile = RecipeHandler.checkfiles(srctree, ['Makefile'])
+ if makefile:
+ lines_after.append('# NOTE: this is a Makefile-only piece of software, so we cannot generate much of the')
+ lines_after.append('# recipe automatically - you will need to examine the Makefile yourself and ensure')
+ lines_after.append('# that the appropriate arguments are passed in.')
+ lines_after.append('')
+
+ scanfile = os.path.join(srctree, 'configure.scan')
+ skipscan = False
+ try:
+ stdout, stderr = bb.process.run('autoscan', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ skipscan = True
+ if scanfile and os.path.exists(scanfile):
+ values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, acfile=scanfile)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.iteritems():
+ if var == 'DEPENDS':
+ lines_before.append('# NOTE: some of these dependencies may be optional, check the Makefile and/or upstream documentation')
+ lines_before.append('%s = "%s"' % (var, value))
+ lines_before.append('')
+ for f in ['configure.scan', 'autoscan.log']:
+ fp = os.path.join(srctree, f)
+ if os.path.exists(fp):
+ os.remove(fp)
+
+ self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
+
+ func = []
+ func.append('# You will almost certainly need to add additional arguments here')
+ func.append('oe_runmake')
+ self.genfunction(lines_after, 'do_compile', func)
+
+ installtarget = True
+ try:
+ stdout, stderr = bb.process.run('make -n install', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ if e.exitcode != 1:
+ installtarget = False
+ func = []
+ if installtarget:
+ func.append('# This is a guess; additional arguments may be required')
+ makeargs = ''
+ with open(makefile[0], 'r') as f:
+ for i in range(1, 100):
+ if 'DESTDIR' in f.readline():
+ makeargs += " 'DESTDIR=${D}'"
+ break
+ func.append('oe_runmake install%s' % makeargs)
+ else:
+ func.append('# NOTE: unable to determine what to put here - there is a Makefile but no')
+ func.append('# target named "install", so you will need to define this yourself')
+ self.genfunction(lines_after, 'do_install', func)
+
+ handled.append('buildsystem')
+ else:
+ lines_after.append('# NOTE: no Makefile found, unable to determine what needs to be done')
+ lines_after.append('')
+ self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
+ self.genfunction(lines_after, 'do_compile', ['# Specify compilation commands here'])
+ self.genfunction(lines_after, 'do_install', ['# Specify install commands here'])
+
+
+class VersionFileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'PV' not in extravalues:
+ # Look for a VERSION or version file containing a single line consisting
+ # only of a version number
+ filelist = RecipeHandler.checkfiles(srctree, ['VERSION', 'version'])
+ version = None
+ for fileitem in filelist:
+ linecount = 0
+ with open(fileitem, 'r') as f:
+ for line in f:
+ line = line.rstrip().strip('"\'')
+ linecount += 1
+ if line:
+ if linecount > 1:
+ version = None
+ break
+ else:
+ if validate_pv(line):
+ version = line
+ if version:
+ extravalues['PV'] = version
+ break
+
+
+class SpecFileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'PV' in extravalues and 'PN' in extravalues:
+ return
+ filelist = RecipeHandler.checkfiles(srctree, ['*.spec'], recursive=True)
+ pn = None
+ pv = None
+ for fileitem in filelist:
+ linecount = 0
+ with open(fileitem, 'r') as f:
+ for line in f:
+ if line.startswith('Name:') and not pn:
+ pn = line.split(':')[1].strip()
+ if line.startswith('Version:') and not pv:
+ pv = line.split(':')[1].strip()
+ if pv or pn:
+ if pv and not 'PV' in extravalues and validate_pv(pv):
+ extravalues['PV'] = pv
+ if pn and not 'PN' in extravalues:
+ extravalues['PN'] = pn
+ break
+
+def register_recipe_handlers(handlers):
+ # Set priorities with some gaps so that other plugins can insert
+ # their own handlers (so avoid changing these numbers)
+ handlers.append((CmakeRecipeHandler(), 50))
+ handlers.append((AutotoolsRecipeHandler(), 40))
+ handlers.append((SconsRecipeHandler(), 30))
+ handlers.append((QmakeRecipeHandler(), 20))
+ handlers.append((MakefileRecipeHandler(), 10))
+ handlers.append((VersionFileRecipeHandler(), -1))
+ handlers.append((SpecFileRecipeHandler(), -1))
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
new file mode 100644
index 0000000..c382330
--- /dev/null
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -0,0 +1,719 @@
+# Recipe creation tool - create build system handler for python
+#
+# Copyright (C) 2015 Mentor Graphics Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import ast
+import codecs
+import collections
+import distutils.command.build_py
+import email
+import imp
+import glob
+import itertools
+import logging
+import os
+import re
+import sys
+import subprocess
+from recipetool.create import RecipeHandler
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class PythonRecipeHandler(RecipeHandler):
+ base_pkgdeps = ['python-core']
+ excluded_pkgdeps = ['python-dbg']
+ # os.path is provided by python-core
+ assume_provided = ['builtins', 'os.path']
+ # Assumes that the host python builtin_module_names is sane for target too
+ assume_provided = assume_provided + list(sys.builtin_module_names)
+
+ bbvar_map = {
+ 'Name': 'PN',
+ 'Version': 'PV',
+ 'Home-page': 'HOMEPAGE',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Requires': 'RDEPENDS_${PN}',
+ 'Provides': 'RPROVIDES_${PN}',
+ 'Obsoletes': 'RREPLACES_${PN}',
+ }
+ # PN/PV are already set by recipetool core & desc can be extremely long
+ excluded_fields = [
+ 'Name',
+ 'Version',
+ 'Description',
+ ]
+ setup_parse_map = {
+ 'Url': 'Home-page',
+ 'Classifiers': 'Classifier',
+ 'Description': 'Summary',
+ }
+ setuparg_map = {
+ 'Home-page': 'url',
+ 'Classifier': 'classifiers',
+ 'Summary': 'description',
+ 'Description': 'long-description',
+ }
+ # Values which are lists, used by the setup.py argument based metadata
+ # extraction method, to determine how to process the setup.py output.
+ setuparg_list_fields = [
+ 'Classifier',
+ 'Requires',
+ 'Provides',
+ 'Obsoletes',
+ 'Platform',
+ 'Supported-Platform',
+ ]
+ setuparg_multi_line_values = ['Description']
+ replacements = [
+ ('License', r' ', '-'),
+ ('License', r'-License$', ''),
+ ('License', r'^UNKNOWN$', ''),
+
+ # Remove currently unhandled version numbers from these variables
+ ('Requires', r' *\([^)]*\)', ''),
+ ('Provides', r' *\([^)]*\)', ''),
+ ('Obsoletes', r' *\([^)]*\)', ''),
+ ('Install-requires', r'^([^><= ]+).*', r'\1'),
+ ('Extras-require', r'^([^><= ]+).*', r'\1'),
+ ('Tests-require', r'^([^><= ]+).*', r'\1'),
+
+ # Remove unhandled dependency on particular features (e.g. foo[PDF])
+ ('Install-requires', r'\[[^\]]+\]$', ''),
+ ]
+
+ classifier_license_map = {
+ 'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
+ 'License :: OSI Approved :: Apache Software License': 'Apache',
+ 'License :: OSI Approved :: Apple Public Source License': 'APSL',
+ 'License :: OSI Approved :: Artistic License': 'Artistic',
+ 'License :: OSI Approved :: Attribution Assurance License': 'AAL',
+ 'License :: OSI Approved :: BSD License': 'BSD',
+ 'License :: OSI Approved :: Common Public License': 'CPL',
+ 'License :: OSI Approved :: Eiffel Forum License': 'EFL',
+ 'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
+ 'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0+',
+ 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0',
+ 'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
+ 'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
+ 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0+',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0+',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0+',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0+',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
+ 'License :: OSI Approved :: IBM Public License': 'IPL',
+ 'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
+ 'License :: OSI Approved :: Intel Open Source License': 'Intel',
+ 'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
+ 'License :: OSI Approved :: MIT License': 'MIT',
+ 'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
+ 'License :: OSI Approved :: Motosoto License': 'Motosoto',
+ 'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
+ 'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
+ 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)': 'MPL-2.0',
+ 'License :: OSI Approved :: Nethack General Public License': 'NGPL',
+ 'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
+ 'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
+ 'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
+ 'License :: OSI Approved :: Python Software Foundation License': 'PSF',
+ 'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
+ 'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
+ 'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
+ 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': '-- Sun Industry Standards Source License (SISSL)',
+ 'License :: OSI Approved :: Sun Public License': 'SPL',
+ 'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
+ 'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
+ 'License :: OSI Approved :: W3C License': 'W3C',
+ 'License :: OSI Approved :: X.Net License': 'Xnet',
+ 'License :: OSI Approved :: Zope Public License': 'ZPL',
+ 'License :: OSI Approved :: zlib/libpng License': 'Zlib',
+ }
+
+ def __init__(self):
+ pass
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if not RecipeHandler.checkfiles(srctree, ['setup.py']):
+ return
+
+ # setup.py is always parsed to get at certain required information, such as
+ # distutils vs setuptools
+ #
+ # If egg info is available, we use it for both its PKG-INFO metadata
+ # and for its requires.txt for install_requires.
+ # If PKG-INFO is available but no egg info is, we use that for metadata in preference to
+ # the parsed setup.py, but use the install_requires info from the
+ # parsed setup.py.
+
+ setupscript = os.path.join(srctree, 'setup.py')
+ try:
+ setup_info, uses_setuptools, setup_non_literals, extensions = self.parse_setup_py(setupscript)
+ except Exception:
+ logger.exception("Failed to parse setup.py")
+ setup_info, uses_setuptools, setup_non_literals, extensions = {}, True, [], []
+
+ egginfo = glob.glob(os.path.join(srctree, '*.egg-info'))
+ if egginfo:
+ info = self.get_pkginfo(os.path.join(egginfo[0], 'PKG-INFO'))
+ requires_txt = os.path.join(egginfo[0], 'requires.txt')
+ if os.path.exists(requires_txt):
+ with codecs.open(requires_txt) as f:
+ inst_req = []
+ extras_req = collections.defaultdict(list)
+ current_feature = None
+ for line in f.readlines():
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line.startswith('['):
+ current_feature = line[1:-1]
+ elif current_feature:
+ extras_req[current_feature].append(line)
+ else:
+ inst_req.append(line)
+ info['Install-requires'] = inst_req
+ info['Extras-require'] = extras_req
+ elif RecipeHandler.checkfiles(srctree, ['PKG-INFO']):
+ info = self.get_pkginfo(os.path.join(srctree, 'PKG-INFO'))
+
+ if setup_info:
+ if 'Install-requires' in setup_info:
+ info['Install-requires'] = setup_info['Install-requires']
+ if 'Extras-require' in setup_info:
+ info['Extras-require'] = setup_info['Extras-require']
+ else:
+ if setup_info:
+ info = setup_info
+ else:
+ info = self.get_setup_args_info(setupscript)
+
+ self.apply_info_replacements(info)
+
+ if uses_setuptools:
+ classes.append('setuptools')
+ else:
+ classes.append('distutils')
+
+ if 'Classifier' in info:
+ licenses = []
+ for classifier in info['Classifier']:
+ if classifier in self.classifier_license_map:
+ license = self.classifier_license_map[classifier]
+ licenses.append(license)
+
+ if licenses:
+ info['License'] = ' & '.join(licenses)
+
+
+ # Map PKG-INFO & setup.py fields to bitbake variables
+ bbinfo = {}
+ for field, values in info.iteritems():
+ if field in self.excluded_fields:
+ continue
+
+ if field not in self.bbvar_map:
+ continue
+
+ if isinstance(values, basestring):
+ value = values
+ else:
+ value = ' '.join(str(v) for v in values if v)
+
+ bbvar = self.bbvar_map[field]
+ if bbvar not in bbinfo and value:
+ bbinfo[bbvar] = value
+
+ comment_lic_line = None
+ for pos, line in enumerate(list(lines_before)):
+ if line.startswith('#') and 'LICENSE' in line:
+ comment_lic_line = pos
+ elif line.startswith('LICENSE =') and 'LICENSE' in bbinfo:
+ if line in ('LICENSE = "Unknown"', 'LICENSE = "CLOSED"'):
+ lines_before[pos] = 'LICENSE = "{}"'.format(bbinfo['LICENSE'])
+ if line == 'LICENSE = "CLOSED"' and comment_lic_line:
+ lines_before[comment_lic_line:pos] = [
+ '# WARNING: the following LICENSE value is a best guess - it is your',
+ '# responsibility to verify that the value is complete and correct.'
+ ]
+ del bbinfo['LICENSE']
+
+ src_uri_line = None
+ for pos, line in enumerate(lines_before):
+ if line.startswith('SRC_URI ='):
+ src_uri_line = pos
+
+ if bbinfo:
+ mdinfo = ['']
+ for k in sorted(bbinfo):
+ v = bbinfo[k]
+ mdinfo.append('{} = "{}"'.format(k, v))
+ if src_uri_line:
+ lines_before[src_uri_line-1:src_uri_line-1] = mdinfo
+ else:
+ lines_before.extend(mdinfo)
+
+ mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
+
+ extras_req = set()
+ if 'Extras-require' in info:
+ extras_req = info['Extras-require']
+ if extras_req:
+ lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
+ lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
+ lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('#')
+ lines_after.append('# Uncomment this line to enable all the optional features.')
+ lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req.iterkeys())))
+ for feature, feature_reqs in extras_req.iteritems():
+ unmapped_deps.difference_update(feature_reqs)
+
+ feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
+
+ inst_reqs = set()
+ if 'Install-requires' in info:
+ if extras_req:
+ lines_after.append('')
+ inst_reqs = info['Install-requires']
+ if inst_reqs:
+ unmapped_deps.difference_update(inst_reqs)
+
+ inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
+ lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+
+ if mapped_deps:
+ name = info.get('Name')
+ if name and name[0] in mapped_deps:
+ # Attempt to avoid self-reference
+ mapped_deps.remove(name[0])
+ mapped_deps -= set(self.excluded_pkgdeps)
+ if inst_reqs or extras_req:
+ lines_after.append('')
+ lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
+ lines_after.append('# python sources, and might not be 100% accurate.')
+ lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+
+ unmapped_deps -= set(extensions)
+ unmapped_deps -= set(self.assume_provided)
+ if unmapped_deps:
+ if mapped_deps:
+ lines_after.append('')
+ lines_after.append('# WARNING: We were unable to map the following python package/module')
+ lines_after.append('# dependencies to the bitbake packages which include them:')
+ lines_after.extend('# {}'.format(d) for d in sorted(unmapped_deps))
+
+ handled.append('buildsystem')
+
+ def get_pkginfo(self, pkginfo_fn):
+ msg = email.message_from_file(open(pkginfo_fn, 'r'))
+ msginfo = {}
+ for field in msg.keys():
+ values = msg.get_all(field)
+ if len(values) == 1:
+ msginfo[field] = values[0]
+ else:
+ msginfo[field] = values
+ return msginfo
+
+ def parse_setup_py(self, setupscript='./setup.py'):
+ with codecs.open(setupscript) as f:
+ info, imported_modules, non_literals, extensions = gather_setup_info(f)
+
+ def _map(key):
+ key = key.replace('_', '-')
+ key = key[0].upper() + key[1:]
+ if key in self.setup_parse_map:
+ key = self.setup_parse_map[key]
+ return key
+
+ # Naive mapping of setup() arguments to PKG-INFO field names
+ for d in [info, non_literals]:
+ for key, value in d.items():
+ new_key = _map(key)
+ if new_key != key:
+ del d[key]
+ d[new_key] = value
+
+ return info, 'setuptools' in imported_modules, non_literals, extensions
+
+ def get_setup_args_info(self, setupscript='./setup.py'):
+ cmd = ['python', setupscript]
+ info = {}
+ keys = set(self.bbvar_map.keys())
+ keys |= set(self.setuparg_list_fields)
+ keys |= set(self.setuparg_multi_line_values)
+ grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
+ for index, keys in grouped_keys:
+ if index == (True, False):
+ # Splitlines output for each arg as a list value
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = [l.rstrip() for l in arg_info.splitlines()]
+ elif index == (False, True):
+ # Entire output for each arg
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = arg_info
+ else:
+ info.update(self.get_setup_byline(list(keys), setupscript))
+ return info
+
+ def get_setup_byline(self, fields, setupscript='./setup.py'):
+ info = {}
+
+ cmd = ['python', setupscript]
+ cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
+ try:
+ info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ if len(fields) != len(info_lines):
+ logger.error('Mismatch between setup.py output lines and number of fields')
+ sys.exit(1)
+
+ for lineno, line in enumerate(info_lines):
+ line = line.rstrip()
+ info[fields[lineno]] = line
+ return info
+
+ def apply_info_replacements(self, info):
+ for variable, search, replace in self.replacements:
+ if variable not in info:
+ continue
+
+ def replace_value(search, replace, value):
+ if replace is None:
+ if re.search(search, value):
+ return None
+ else:
+ new_value = re.sub(search, replace, value)
+ if value != new_value:
+ return new_value
+ return value
+
+ value = info[variable]
+ if isinstance(value, basestring):
+ new_value = replace_value(search, replace, value)
+ if new_value is None:
+ del info[variable]
+ elif new_value != value:
+ info[variable] = new_value
+ elif hasattr(value, 'iteritems'):
+ for dkey, dvalue in value.iteritems():
+ new_list = []
+ for pos, a_value in enumerate(dvalue):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ value[dkey] = new_list
+ else:
+ new_list = []
+ for pos, a_value in enumerate(value):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ info[variable] = new_list
+
+ def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
+ if 'Package-dir' in setup_info:
+ package_dir = setup_info['Package-dir']
+ else:
+ package_dir = {}
+
+ class PackageDir(distutils.command.build_py.build_py):
+ def __init__(self, package_dir):
+ self.package_dir = package_dir
+
+ pd = PackageDir(package_dir)
+ to_scan = []
+ if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
+ if 'Py-modules' in setup_info:
+ for module in setup_info['Py-modules']:
+ try:
+ package, module = module.rsplit('.', 1)
+ except ValueError:
+ package, module = '.', module
+ module_path = os.path.join(pd.get_package_dir(package), module + '.py')
+ to_scan.append(module_path)
+
+ if 'Packages' in setup_info:
+ for package in setup_info['Packages']:
+ to_scan.append(pd.get_package_dir(package))
+
+ if 'Scripts' in setup_info:
+ to_scan.extend(setup_info['Scripts'])
+ else:
+ logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
+
+ if not to_scan:
+ to_scan = ['.']
+
+ logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
+ mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
+ for dep in scanned_deps:
+ mapped = provided_packages.get(dep)
+ if mapped:
+ mapped_deps.add(mapped)
+ else:
+ unmapped_deps.add(dep)
+ return mapped_deps, unmapped_deps
+
+ def scan_python_dependencies(self, paths):
+ deps = set()
+ try:
+ dep_output = self.run_command(['pythondeps', '-d'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ for line in dep_output.splitlines():
+ line = line.rstrip()
+ dep, filename = line.split('\t', 1)
+ if filename.endswith('/setup.py'):
+ continue
+ deps.add(dep)
+
+ try:
+ provides_output = self.run_command(['pythondeps', '-p'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ provides_lines = (l.rstrip() for l in provides_output.splitlines())
+ provides = set(l for l in provides_lines if l and l != 'setup')
+ deps -= provides
+
+ return deps
+
+ def parse_pkgdata_for_python_packages(self):
+ suffixes = [t[0] for t in imp.get_suffixes()]
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+
+ ldata = tinfoil.config_data.createCopy()
+ bb.parse.handle('classes/python-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR', True)
+
+ dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
+ python_dirs = [python_sitedir + os.sep,
+ os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
+ os.path.dirname(python_sitedir) + os.sep]
+ packages = {}
+ for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
+ files_info = None
+ with open(pkgdatafile, 'r') as f:
+ for line in f.readlines():
+ field, value = line.split(': ', 1)
+ if field == 'FILES_INFO':
+ files_info = ast.literal_eval(value)
+ break
+ else:
+ continue
+
+ for fn in files_info.iterkeys():
+ for suffix in suffixes:
+ if fn.endswith(suffix):
+ break
+ else:
+ continue
+
+ if fn.startswith(dynload_dir + os.sep):
+ base = os.path.basename(fn)
+ provided = base.split('.', 1)[0]
+ packages[provided] = os.path.basename(pkgdatafile)
+ continue
+
+ for python_dir in python_dirs:
+ if fn.startswith(python_dir):
+ relpath = fn[len(python_dir):]
+ relstart, _, relremaining = relpath.partition(os.sep)
+ if relstart.endswith('.egg'):
+ relpath = relremaining
+ base, _ = os.path.splitext(relpath)
+
+ if '/.debug/' in base:
+ continue
+ if os.path.basename(base) == '__init__':
+ base = os.path.dirname(base)
+ base = base.replace(os.sep + os.sep, os.sep)
+ provided = base.replace(os.sep, '.')
+ packages[provided] = os.path.basename(pkgdatafile)
+ return packages
+
+ @classmethod
+ def run_command(cls, cmd, **popenargs):
+ if 'stderr' not in popenargs:
+ popenargs['stderr'] = subprocess.STDOUT
+ try:
+ return subprocess.check_output(cmd, **popenargs)
+ except OSError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
+ raise
+ except subprocess.CalledProcessError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
+ raise
+
+
+def gather_setup_info(fileobj):
+ parsed = ast.parse(fileobj.read(), fileobj.name)
+ visitor = SetupScriptVisitor()
+ visitor.visit(parsed)
+
+ non_literals, extensions = {}, []
+ for key, value in visitor.keywords.items():
+ if key == 'ext_modules':
+ if isinstance(value, list):
+ for ext in value:
+ if (isinstance(ext, ast.Call) and
+ isinstance(ext.func, ast.Name) and
+ ext.func.id == 'Extension' and
+ not has_non_literals(ext.args)):
+ extensions.append(ext.args[0])
+ elif has_non_literals(value):
+ non_literals[key] = value
+ del visitor.keywords[key]
+
+ return visitor.keywords, visitor.imported_modules, non_literals, extensions
+
+
+class SetupScriptVisitor(ast.NodeVisitor):
+ def __init__(self):
+ ast.NodeVisitor.__init__(self)
+ self.keywords = {}
+ self.non_literals = []
+ self.imported_modules = set()
+
+ def visit_Expr(self, node):
+ if isinstance(node.value, ast.Call) and \
+ isinstance(node.value.func, ast.Name) and \
+ node.value.func.id == 'setup':
+ self.visit_setup(node.value)
+
+ def visit_setup(self, node):
+ call = LiteralAstTransform().visit(node)
+ self.keywords = call.keywords
+ for k, v in self.keywords.iteritems():
+ if has_non_literals(v):
+ self.non_literals.append(k)
+
+ def visit_Import(self, node):
+ for alias in node.names:
+ self.imported_modules.add(alias.name)
+
+ def visit_ImportFrom(self, node):
+ self.imported_modules.add(node.module)
+
+
+class LiteralAstTransform(ast.NodeTransformer):
+ """Simplify the ast through evaluation of literals."""
+ excluded_fields = ['ctx']
+
+ def visit(self, node):
+ if not isinstance(node, ast.AST):
+ return node
+ else:
+ return ast.NodeTransformer.visit(self, node)
+
+ def generic_visit(self, node):
+ try:
+ return ast.literal_eval(node)
+ except ValueError:
+ for field, value in ast.iter_fields(node):
+ if field in self.excluded_fields:
+ delattr(node, field)
+ if value is None:
+ continue
+
+ if isinstance(value, list):
+ if field in ('keywords', 'kwargs'):
+ new_value = dict((kw.arg, self.visit(kw.value)) for kw in value)
+ else:
+ new_value = [self.visit(i) for i in value]
+ else:
+ new_value = self.visit(value)
+ setattr(node, field, new_value)
+ return node
+
+ def visit_Name(self, node):
+ if hasattr('__builtins__', node.id):
+ return getattr(__builtins__, node.id)
+ else:
+ return self.generic_visit(node)
+
+ def visit_Tuple(self, node):
+ return tuple(self.visit(v) for v in node.elts)
+
+ def visit_List(self, node):
+ return [self.visit(v) for v in node.elts]
+
+ def visit_Set(self, node):
+ return set(self.visit(v) for v in node.elts)
+
+ def visit_Dict(self, node):
+ keys = (self.visit(k) for k in node.keys)
+ values = (self.visit(v) for v in node.values)
+ return dict(zip(keys, values))
+
+
+def has_non_literals(value):
+ if isinstance(value, ast.AST):
+ return True
+ elif isinstance(value, basestring):
+ return False
+ elif hasattr(value, 'itervalues'):
+ return any(has_non_literals(v) for v in value.itervalues())
+ elif hasattr(value, '__iter__'):
+ return any(has_non_literals(v) for v in value)
+
+
+def register_recipe_handlers(handlers):
+ # We need to make sure this is ahead of the makefile fallback handler
+ handlers.append((PythonRecipeHandler(), 70))
diff --git a/scripts/lib/recipetool/create_kernel.py b/scripts/lib/recipetool/create_kernel.py
new file mode 100644
index 0000000..c6e86bd
--- /dev/null
+++ b/scripts/lib/recipetool/create_kernel.py
@@ -0,0 +1,99 @@
+# Recipe creation tool - kernel support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class KernelRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.process
+ if 'buildsystem' in handled:
+ return False
+
+ for tell in ['arch', 'firmware', 'Kbuild', 'Kconfig']:
+ if not os.path.exists(os.path.join(srctree, tell)):
+ return False
+
+ handled.append('buildsystem')
+ del lines_after[:]
+ del classes[:]
+ template = os.path.join(tinfoil.config_data.getVar('COREBASE', True), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
+ def handle_var(varname, origvalue, op, newlines):
+ if varname in ['SRCREV', 'SRCREV_machine']:
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ try:
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip(), op, 0, True
+ elif varname == 'LINUX_VERSION':
+ makefile = os.path.join(srctree, 'Makefile')
+ if os.path.exists(makefile):
+ kversion = -1
+ kpatchlevel = -1
+ ksublevel = -1
+ kextraversion = ''
+ with open(makefile, 'r') as f:
+ for i, line in enumerate(f):
+ if i > 10:
+ break
+ if line.startswith('VERSION ='):
+ kversion = int(line.split('=')[1].strip())
+ elif line.startswith('PATCHLEVEL ='):
+ kpatchlevel = int(line.split('=')[1].strip())
+ elif line.startswith('SUBLEVEL ='):
+ ksublevel = int(line.split('=')[1].strip())
+ elif line.startswith('EXTRAVERSION ='):
+ kextraversion = line.split('=')[1].strip()
+ version = ''
+ if kversion > -1 and kpatchlevel > -1:
+ version = '%d.%d' % (kversion, kpatchlevel)
+ if ksublevel > -1:
+ version += '.%d' % ksublevel
+ version += kextraversion
+ if version:
+ return version, op, 0, True
+ elif varname == 'SRC_URI':
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ elif varname == 'COMPATIBLE_MACHINE':
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ machine = tinfoil.config_data.getVar('MACHINE', True)
+ return machine, op, 0, True
+ return origvalue, op, 0, True
+ with open(template, 'r') as f:
+ varlist = ['SRCREV', 'SRCREV_machine', 'SRC_URI', 'LINUX_VERSION', 'COMPATIBLE_MACHINE']
+ (_, newlines) = bb.utils.edit_metadata(f, varlist, handle_var)
+ lines_before[:] = [line.rstrip('\n') for line in newlines]
+
+ return True
+
+def register_recipe_handlers(handlers):
+ handlers.append((KernelRecipeHandler(), 100))
diff --git a/scripts/lib/recipetool/create_kmod.py b/scripts/lib/recipetool/create_kmod.py
new file mode 100644
index 0000000..fe39edb
--- /dev/null
+++ b/scripts/lib/recipetool/create_kmod.py
@@ -0,0 +1,152 @@
+# Recipe creation tool - kernel module support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class KernelModuleRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.process
+ if 'buildsystem' in handled:
+ return False
+
+ module_inc_re = re.compile(r'^#include\s+<linux/module.h>$')
+ makefiles = []
+ is_module = False
+
+ makefiles = []
+
+ files = RecipeHandler.checkfiles(srctree, ['*.c', '*.h'], recursive=True)
+ if files:
+ for cfile in files:
+ # Look in same dir or parent for Makefile
+ for makefile in [os.path.join(os.path.dirname(cfile), 'Makefile'), os.path.join(os.path.dirname(os.path.dirname(cfile)), 'Makefile')]:
+ if makefile in makefiles:
+ break
+ else:
+ if os.path.exists(makefile):
+ makefiles.append(makefile)
+ break
+ else:
+ continue
+ with open(cfile, 'r') as f:
+ for line in f:
+ if module_inc_re.match(line.strip()):
+ is_module = True
+ break
+ if is_module:
+ break
+
+ if is_module:
+ classes.append('module')
+ handled.append('buildsystem')
+ # module.bbclass and the classes it inherits do most of the hard
+ # work, but we need to tweak it slightly depending on what the
+ # Makefile does (and there is a range of those)
+ # Check the makefile for the appropriate install target
+ install_lines = []
+ compile_lines = []
+ in_install = False
+ in_compile = False
+ install_target = None
+ with open(makefile, 'r') as f:
+ for line in f:
+ if line.startswith('install:'):
+ if not install_lines:
+ in_install = True
+ install_target = 'install'
+ elif line.startswith('modules_install:'):
+ install_lines = []
+ in_install = True
+ install_target = 'modules_install'
+ elif line.startswith('modules:'):
+ compile_lines = []
+ in_compile = True
+ elif line.startswith(('all:', 'default:')):
+ if not compile_lines:
+ in_compile = True
+ elif line:
+ if line[0] == '\t':
+ if in_install:
+ install_lines.append(line)
+ elif in_compile:
+ compile_lines.append(line)
+ elif ':' in line:
+ in_install = False
+ in_compile = False
+
+ def check_target(lines, install):
+ kdirpath = ''
+ manual_install = False
+ for line in lines:
+ splitline = line.split()
+ if splitline[0] in ['make', 'gmake', '$(MAKE)']:
+ if '-C' in splitline:
+ idx = splitline.index('-C') + 1
+ if idx < len(splitline):
+ kdirpath = splitline[idx]
+ break
+ elif install and splitline[0] == 'install':
+ if '.ko' in line:
+ manual_install = True
+ return kdirpath, manual_install
+
+ kdirpath = None
+ manual_install = False
+ if install_lines:
+ kdirpath, manual_install = check_target(install_lines, install=True)
+ if compile_lines and not kdirpath:
+ kdirpath, _ = check_target(compile_lines, install=False)
+
+ if manual_install or not install_lines:
+ lines_after.append('EXTRA_OEMAKE_append_task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
+ elif install_target and install_target != 'modules_install':
+ lines_after.append('MODULES_INSTALL_TARGET = "install"')
+
+ warnmsg = None
+ kdirvar = None
+ if kdirpath:
+ res = re.match(r'\$\(([^$)]+)\)', kdirpath)
+ if res:
+ kdirvar = res.group(1)
+ if kdirvar != 'KERNEL_SRC':
+ lines_after.append('EXTRA_OEMAKE += "%s=${STAGING_KERNEL_DIR}"' % kdirvar)
+ elif kdirpath.startswith('/lib/'):
+ warnmsg = 'Kernel path in install makefile is hardcoded - you will need to patch the makefile'
+ if not kdirvar and not warnmsg:
+ warnmsg = 'Unable to find means of passing kernel path into install makefile - if kernel path is hardcoded you will need to patch the makefile'
+ if warnmsg:
+ warnmsg += '. Note that the variable KERNEL_SRC will be passed in as the kernel source path.'
+ logger.warn(warnmsg)
+ lines_after.append('# %s' % warnmsg)
+
+ return True
+
+ return False
+
+def register_recipe_handlers(handlers):
+ handlers.append((KernelModuleRecipeHandler(), 15))
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
new file mode 100644
index 0000000..cc4fb42
--- /dev/null
+++ b/scripts/lib/recipetool/create_npm.py
@@ -0,0 +1,156 @@
+# Recipe creation tool - node.js NPM module support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import logging
+import subprocess
+import tempfile
+import shutil
+import json
+from recipetool.create import RecipeHandler, split_pkg_licenses
+
+logger = logging.getLogger('recipetool')
+
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class NpmRecipeHandler(RecipeHandler):
+ lockdownpath = None
+
+ def _handle_license(self, data):
+ '''
+ Handle the license value from an npm package.json file
+ '''
+ license = None
+ if 'license' in data:
+ license = data['license']
+ if isinstance(license, dict):
+ license = license.get('type', None)
+ return license
+
+ def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before):
+ try:
+ runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+ bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ except bb.process.ExecutionError as e:
+ logger.warn('npm shrinkwrap failed:\n%s' % e.stdout)
+ return
+
+ tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
+ shutil.move(os.path.join(srctree, 'npm-shrinkwrap.json'), tmpfile)
+ extravalues.setdefault('extrafiles', {})
+ extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
+ lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
+
+ def _lockdown(self, srctree, localfilesdir, extravalues, lines_before):
+ runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+ if not NpmRecipeHandler.lockdownpath:
+ NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
+ bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
+ cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
+ if not os.path.exists(relockbin):
+ logger.warn('Could not find relock.js within lockdown directory; skipping lockdown')
+ return
+ try:
+ bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ except bb.process.ExecutionError as e:
+ logger.warn('lockdown-relock failed:\n%s' % e.stdout)
+ return
+
+ tmpfile = os.path.join(localfilesdir, 'lockdown.json')
+ shutil.move(os.path.join(srctree, 'lockdown.json'), tmpfile)
+ extravalues.setdefault('extrafiles', {})
+ extravalues['extrafiles']['lockdown.json'] = tmpfile
+ lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.utils
+ import oe
+ from collections import OrderedDict
+
+ if 'buildsystem' in handled:
+ return False
+
+ def read_package_json(fn):
+ with open(fn, 'r') as f:
+ return json.loads(f.read())
+
+ files = RecipeHandler.checkfiles(srctree, ['package.json'])
+ if files:
+ data = read_package_json(files[0])
+ if 'name' in data and 'version' in data:
+ extravalues['PN'] = data['name']
+ extravalues['PV'] = data['version']
+ classes.append('npm')
+ handled.append('buildsystem')
+ if 'description' in data:
+ lines_before.append('SUMMARY = "%s"' % data['description'])
+ if 'homepage' in data:
+ lines_before.append('HOMEPAGE = "%s"' % data['homepage'])
+
+ # Shrinkwrap
+ localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
+ self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before)
+
+ # Lockdown
+ self._lockdown(srctree, localfilesdir, extravalues, lines_before)
+
+ # Split each npm module out to is own package
+ npmpackages = oe.package.npm_split_package_dirs(srctree)
+ for item in handled:
+ if isinstance(item, tuple):
+ if item[0] == 'license':
+ licvalues = item[1]
+ break
+ if licvalues:
+ # Augment the license list with information we have in the packages
+ licenses = {}
+ license = self._handle_license(data)
+ if license:
+ licenses['${PN}'] = license
+ for pkgname, pkgitem in npmpackages.iteritems():
+ _, pdata = pkgitem
+ license = self._handle_license(pdata)
+ if license:
+ licenses[pkgname] = license
+ # Now write out the package-specific license values
+ # We need to strip out the json data dicts for this since split_pkg_licenses
+ # isn't expecting it
+ packages = OrderedDict((x,y[0]) for x,y in npmpackages.iteritems())
+ packages['${PN}'] = ''
+ pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
+ all_licenses = list(set([item for pkglicense in pkglicenses.values() for item in pkglicense]))
+ # Go back and update the LICENSE value since we have a bit more
+ # information than when that was written out (and we know all apply
+ # vs. there being a choice, so we can join them with &)
+ for i, line in enumerate(lines_before):
+ if line.startswith('LICENSE = '):
+ lines_before[i] = 'LICENSE = "%s"' % ' & '.join(all_licenses)
+ break
+
+ return True
+
+ return False
+
+def register_recipe_handlers(handlers):
+ handlers.append((NpmRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/newappend.py b/scripts/lib/recipetool/newappend.py
new file mode 100644
index 0000000..4fbb40a
--- /dev/null
+++ b/scripts/lib/recipetool/newappend.py
@@ -0,0 +1,112 @@
+# Recipe creation tool - newappend plugin
+#
+# This sub-command creates a bbappend for the specified target and prints the
+# path to the bbappend.
+#
+# Example: recipetool newappend meta-mylayer busybox
+#
+# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import argparse
+import errno
+import logging
+import os
+import re
+import subprocess
+import sys
+import scriptutils
+
+
+logger = logging.getLogger('recipetool')
+tinfoil = None
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+def _provide_to_pn(cooker, provide):
+ """Get the name of the preferred recipe for the specified provide."""
+ import bb.providers
+ filenames = cooker.recipecache.providers[provide]
+ eligible, foundUnique = bb.providers.filterProviders(filenames, provide, cooker.expanded_data, cooker.recipecache)
+ filename = eligible[0]
+ pn = cooker.recipecache.pkg_fn[filename]
+ return pn
+
+
+def _get_recipe_file(cooker, pn):
+ import oe.recipeutils
+ recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
+ if not recipefile:
+ skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
+ if skipreasons:
+ logger.error('\n'.join(skipreasons))
+ else:
+ logger.error("Unable to find any recipe file matching %s" % pn)
+ return recipefile
+
+
+def layer(layerpath):
+ if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
+ raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
+ return layerpath
+
+
+def newappend(args):
+ import oe.recipeutils
+
+ pn = _provide_to_pn(tinfoil.cooker, args.target)
+ recipe_path = _get_recipe_file(tinfoil.cooker, pn)
+
+ rd = tinfoil.config_data.createCopy()
+ rd.setVar('FILE', recipe_path)
+ append_path, path_ok = oe.recipeutils.get_bbappend_path(rd, args.destlayer, args.wildcard_version)
+ if not append_path:
+ logger.error('Unable to determine layer directory containing %s', recipe_path)
+ return 1
+
+ if not path_ok:
+ logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
+
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+ if not os.path.abspath(args.destlayer) in layerdirs:
+ logger.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+
+ if not os.path.exists(append_path):
+ bb.utils.mkdirhier(os.path.dirname(append_path))
+
+ try:
+ open(append_path, 'a').close()
+ except (OSError, IOError) as exc:
+ logger.critical(str(exc))
+ return 1
+
+ if args.edit:
+ return scriptutils.run_editor([append_path, recipe_path])
+ else:
+ print(append_path)
+
+
+def register_commands(subparsers):
+ parser = subparsers.add_parser('newappend',
+ help='Create a bbappend for the specified target in the specified layer')
+ parser.add_argument('-e', '--edit', help='Edit the new append. This obeys $VISUAL if set, otherwise $EDITOR, otherwise vi.', action='store_true')
+ parser.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
+ parser.add_argument('destlayer', help='Base directory of the destination layer to write the bbappend to', type=layer)
+ parser.add_argument('target', help='Target recipe/provide to append')
+ parser.set_defaults(func=newappend, parserecipes=True)
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
new file mode 100644
index 0000000..657d2b6
--- /dev/null
+++ b/scripts/lib/recipetool/setvar.py
@@ -0,0 +1,75 @@
+# Recipe creation tool - set variable plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import logging
+import scriptutils
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+def setvar(args):
+ import oe.recipeutils
+
+ if args.delete:
+ if args.value:
+ logger.error('-D/--delete and specifying a value are mutually exclusive')
+ return 1
+ value = None
+ else:
+ if args.value is None:
+ logger.error('You must specify a value if not using -D/--delete')
+ return 1
+ value = args.value
+ varvalues = {args.varname: value}
+
+ if args.recipe_only:
+ patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
+ else:
+ rd = oe.recipeutils.parse_recipe(args.recipefile, None, tinfoil.config_data)
+ if not rd:
+ return 1
+ patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
+ if args.patch:
+ for patch in patches:
+ for line in patch:
+ sys.stdout.write(line)
+ return 0
+
+
+def register_commands(subparsers):
+ parser_setvar = subparsers.add_parser('setvar',
+ help='Set a variable within a recipe',
+ description='Adds/updates the value a variable is set to in a recipe')
+ parser_setvar.add_argument('recipefile', help='Recipe file to update')
+ parser_setvar.add_argument('varname', help='Variable name to set')
+ parser_setvar.add_argument('value', nargs='?', help='New value to set the variable to')
+ parser_setvar.add_argument('--recipe-only', '-r', help='Do not set variable in any include file if present', action='store_true')
+ parser_setvar.add_argument('--patch', '-p', help='Create a patch to make the change instead of modifying the recipe', action='store_true')
+ parser_setvar.add_argument('--delete', '-D', help='Delete the specified value instead of setting it', action='store_true')
+ parser_setvar.set_defaults(func=setvar)
diff --git a/scripts/lib/scriptpath.py b/scripts/lib/scriptpath.py
new file mode 100644
index 0000000..d00317e
--- /dev/null
+++ b/scripts/lib/scriptpath.py
@@ -0,0 +1,42 @@
+# Path utility functions for OE python scripts
+#
+# Copyright (C) 2012-2014 Intel Corporation
+# Copyright (C) 2011 Mentor Graphics Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import os.path
+
+def add_oe_lib_path():
+ basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
+ newpath = basepath + '/meta/lib'
+ sys.path.insert(0, newpath)
+
+def add_bitbake_lib_path():
+ basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
+ bitbakepath = None
+ if os.path.exists(basepath + '/bitbake/lib/bb'):
+ bitbakepath = basepath + '/bitbake'
+ else:
+ # look for bitbake/bin dir in PATH
+ for pth in os.environ['PATH'].split(':'):
+ if os.path.exists(os.path.join(pth, '../lib/bb')):
+ bitbakepath = os.path.abspath(os.path.join(pth, '..'))
+ break
+
+ if bitbakepath:
+ sys.path.insert(0, bitbakepath + '/lib')
+ return bitbakepath
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
new file mode 100644
index 0000000..aef19d3
--- /dev/null
+++ b/scripts/lib/scriptutils.py
@@ -0,0 +1,118 @@
+# Script utility functions
+#
+# Copyright (C) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import logging
+import glob
+import argparse
+import subprocess
+
+def logger_create(name):
+ logger = logging.getLogger(name)
+ loggerhandler = logging.StreamHandler()
+ loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(loggerhandler)
+ logger.setLevel(logging.INFO)
+ return logger
+
+def logger_setup_color(logger, color='auto'):
+ from bb.msg import BBLogFormatter
+ console = logging.StreamHandler(sys.stdout)
+ formatter = BBLogFormatter("%(levelname)s: %(message)s")
+ console.setFormatter(formatter)
+ logger.handlers = [console]
+ if color == 'always' or (color=='auto' and console.stream.isatty()):
+ formatter.enable_color()
+
+
+def load_plugins(logger, plugins, pluginpath):
+ import imp
+
+ def load_plugin(name):
+ logger.debug('Loading plugin %s' % name)
+ fp, pathname, description = imp.find_module(name, [pluginpath])
+ try:
+ return imp.load_module(name, fp, pathname, description)
+ finally:
+ if fp:
+ fp.close()
+
+ logger.debug('Loading plugins from %s...' % pluginpath)
+ for fn in glob.glob(os.path.join(pluginpath, '*.py')):
+ name = os.path.splitext(os.path.basename(fn))[0]
+ if name != '__init__':
+ plugin = load_plugin(name)
+ if hasattr(plugin, 'plugin_init'):
+ plugin.plugin_init(plugins)
+ plugins.append(plugin)
+
+def git_convert_standalone_clone(repodir):
+ """If specified directory is a git repository, ensure it's a standalone clone"""
+ import bb.process
+ if os.path.exists(os.path.join(repodir, '.git')):
+ alternatesfile = os.path.join(repodir, '.git', 'objects', 'info', 'alternates')
+ if os.path.exists(alternatesfile):
+ # This will have been cloned with -s, so we need to convert it so none
+ # of the contents is shared
+ bb.process.run('git repack -a', cwd=repodir)
+ os.remove(alternatesfile)
+
+def fetch_uri(d, uri, destdir, srcrev=None):
+ """Fetch a URI to a local directory"""
+ import bb.data
+ bb.utils.mkdirhier(destdir)
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('BB_STRICT_CHECKSUM', '')
+ localdata.setVar('SRCREV', srcrev)
+ ret = (None, None)
+ olddir = os.getcwd()
+ try:
+ fetcher = bb.fetch2.Fetch([uri], localdata)
+ for u in fetcher.ud:
+ ud = fetcher.ud[u]
+ ud.ignore_checksums = True
+ fetcher.download()
+ for u in fetcher.ud:
+ ud = fetcher.ud[u]
+ if ud.localpath.rstrip(os.sep) == localdata.getVar('DL_DIR', True).rstrip(os.sep):
+ raise Exception('Local path is download directory - please check that the URI "%s" is correct' % uri)
+ fetcher.unpack(destdir)
+ for u in fetcher.ud:
+ ud = fetcher.ud[u]
+ if ud.method.recommends_checksum(ud):
+ md5value = bb.utils.md5_file(ud.localpath)
+ sha256value = bb.utils.sha256_file(ud.localpath)
+ ret = (md5value, sha256value)
+ finally:
+ os.chdir(olddir)
+ return ret
+
+def run_editor(fn):
+ if isinstance(fn, basestring):
+ params = '"%s"' % fn
+ else:
+ params = ''
+ for fnitem in fn:
+ params += ' "%s"' % fnitem
+
+ editor = os.getenv('VISUAL', os.getenv('EDITOR', 'vi'))
+ try:
+ return subprocess.check_call('%s %s' % (editor, params), shell=True)
+ except OSError as exc:
+ logger.error("Execution of editor '%s' failed: %s", editor, exc)
+ return 1
diff --git a/scripts/lib/wic/__init__.py b/scripts/lib/wic/__init__.py
new file mode 100644
index 0000000..63c1d9c
--- /dev/null
+++ b/scripts/lib/wic/__init__.py
@@ -0,0 +1,4 @@
+import os, sys
+
+cur_path = os.path.dirname(__file__) or '.'
+sys.path.insert(0, cur_path + '/3rdparty')
diff --git a/scripts/lib/wic/__version__.py b/scripts/lib/wic/__version__.py
new file mode 100644
index 0000000..5452a46
--- /dev/null
+++ b/scripts/lib/wic/__version__.py
@@ -0,0 +1 @@
+VERSION = "2.00"
diff --git a/scripts/lib/wic/canned-wks/common.wks.inc b/scripts/lib/wic/canned-wks/common.wks.inc
new file mode 100644
index 0000000..5cf2fd1
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/common.wks.inc
@@ -0,0 +1,3 @@
+# This file is included into 3 canned wks files from this directory
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
diff --git a/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
new file mode 100644
index 0000000..a16bd6a
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
@@ -0,0 +1,11 @@
+# This is an example configuration file for syslinux.
+PROMPT 0
+TIMEOUT 10
+
+ALLOWOPTIONS 1
+SERIAL 0 115200
+
+DEFAULT boot
+LABEL boot
+KERNEL /vmlinuz
+APPEND label=boot root=/dev/sda2 rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0
diff --git a/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks
new file mode 100644
index 0000000..3529e05
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks
@@ -0,0 +1,8 @@
+# short-description: Create a 'pcbios' direct disk image with custom bootloader config
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media. The bootloader configuration source is a user file.
+
+include common.wks.inc
+
+bootloader --configfile="directdisk-bootloader-config.cfg"
+
diff --git a/scripts/lib/wic/canned-wks/directdisk-gpt.wks b/scripts/lib/wic/canned-wks/directdisk-gpt.wks
new file mode 100644
index 0000000..ea01cf3
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/directdisk-gpt.wks
@@ -0,0 +1,10 @@
+# short-description: Create a 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media.
+
+
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+
+bootloader --ptable gpt --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+
diff --git a/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks b/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
new file mode 100644
index 0000000..8a81f8f
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
@@ -0,0 +1,23 @@
+# short-description: Create multi rootfs image using rootfs plugin
+# long-description: Creates a partitioned disk image with two rootfs partitions
+# using rootfs plugin.
+#
+# Partitions can use either
+# - indirect rootfs references to image recipe(s):
+# wic create directdisk-multi-indirect-recipes -e core-image-minimal \
+# --rootfs-dir rootfs1=core-image-minimal
+# --rootfs-dir rootfs2=core-image-minimal-dev
+#
+# - or paths to rootfs directories:
+# wic create directdisk-multi-rootfs \
+# --rootfs-dir rootfs1=tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs/
+# --rootfs-dir rootfs2=tmp/work/qemux86_64-poky-linux/core-image-minimal-dev/1.0-r0/rootfs/
+#
+# - or any combinations of -r and --rootfs command line options
+
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --rootfs-dir=rootfs1 --ondisk sda --fstype=ext4 --label platform --align 1024
+part /rescue --source rootfs --rootfs-dir=rootfs2 --ondisk sda --fstype=ext4 --label secondary --align 1024
+
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+
diff --git a/scripts/lib/wic/canned-wks/directdisk.wks b/scripts/lib/wic/canned-wks/directdisk.wks
new file mode 100644
index 0000000..6db74a7
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/directdisk.wks
@@ -0,0 +1,8 @@
+# short-description: Create a 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media.
+
+include common.wks.inc
+
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+
diff --git a/scripts/lib/wic/canned-wks/mkefidisk.wks b/scripts/lib/wic/canned-wks/mkefidisk.wks
new file mode 100644
index 0000000..696e94e
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/mkefidisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media.
+
+part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
diff --git a/scripts/lib/wic/canned-wks/mkgummidisk.wks b/scripts/lib/wic/canned-wks/mkgummidisk.wks
new file mode 100644
index 0000000..66a22f6
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/mkgummidisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media.
+
+part /boot --source bootimg-efi --sourceparams="loader=gummiboot" --ondisk sda --label msdos --active --align 1024
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
diff --git a/scripts/lib/wic/canned-wks/mkhybridiso.wks b/scripts/lib/wic/canned-wks/mkhybridiso.wks
new file mode 100644
index 0000000..9d34e9b
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/mkhybridiso.wks
@@ -0,0 +1,7 @@
+# short-description: Create a hybrid ISO image
+# long-description: Creates an EFI and legacy bootable hybrid ISO image
+# which can be used on optical media as well as USB media.
+
+part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi,image_name=HYBRID_ISO_IMG" --ondisk cd --label HYBRIDISO --fstype=ext4
+
+bootloader --timeout=15 --append=""
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
new file mode 100644
index 0000000..a6518a0
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -0,0 +1,8 @@
+# short-description: Create a qemu machine 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly use to boot a qemu machine.
+
+include common.wks.inc
+
+bootloader --timeout=0 --append="vga=0 uvesafb.mode_option=640x480-32 root=/dev/vda2 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
+
diff --git a/scripts/lib/wic/canned-wks/sdimage-bootpart.wks b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
new file mode 100644
index 0000000..7ffd632
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
@@ -0,0 +1,6 @@
+# short-description: Create SD card image with a boot partition
+# long-description: Creates a partitioned SD card image. Boot files
+# are located in the first vfat partition.
+
+part /boot --source bootimg-partition --ondisk mmcblk --fstype=vfat --label boot --active --align 4 --size 16
+part / --source rootfs --ondisk mmcblk --fstype=ext4 --label root --align 4
diff --git a/scripts/lib/wic/conf.py b/scripts/lib/wic/conf.py
new file mode 100644
index 0000000..f7d56d0
--- /dev/null
+++ b/scripts/lib/wic/conf.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+
+from wic.ksparser import KickStart, KickStartError
+from wic import msger
+from wic.utils import misc
+
+
+def get_siteconf():
+ wic_path = os.path.dirname(__file__)
+ eos = wic_path.find('scripts') + len('scripts')
+ scripts_path = wic_path[:eos]
+
+ return scripts_path + "/lib/image/config/wic.conf"
+
+class ConfigMgr(object):
+ DEFAULTS = {
+ 'common': {
+ "distro_name": "Default Distribution",
+ "plugin_dir": "/usr/lib/wic/plugins"}, # TODO use prefix also?
+ 'create': {
+ "tmpdir": '/var/tmp/wic',
+ "outdir": './wic-output',
+ "release": None,
+ "logfile": None,
+ "name_prefix": None,
+ "name_suffix": None}
+ }
+
+ # make the manager class as singleton
+ _instance = None
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(ConfigMgr, cls).__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+ def __init__(self, ksconf=None, siteconf=None):
+ # reset config options
+ self.reset()
+
+ if not siteconf:
+ siteconf = get_siteconf()
+
+ # initial options from siteconf
+ self._siteconf = siteconf
+
+ if ksconf:
+ self._ksconf = ksconf
+
+ def reset(self):
+ self.__ksconf = None
+ self.__siteconf = None
+ self.create = {}
+
+ # initialize the values with defaults
+ for sec, vals in self.DEFAULTS.iteritems():
+ setattr(self, sec, vals)
+
+ def __set_ksconf(self, ksconf):
+ if not os.path.isfile(ksconf):
+ msger.error('Cannot find ks file: %s' % ksconf)
+
+ self.__ksconf = ksconf
+ self._parse_kickstart(ksconf)
+ def __get_ksconf(self):
+ return self.__ksconf
+ _ksconf = property(__get_ksconf, __set_ksconf)
+
+ def _parse_kickstart(self, ksconf=None):
+ if not ksconf:
+ return
+
+ try:
+ ksobj = KickStart(ksconf)
+ except KickStartError as err:
+ msger.error(str(err))
+
+ self.create['ks'] = ksobj
+ self.create['name'] = os.path.splitext(os.path.basename(ksconf))[0]
+
+ self.create['name'] = misc.build_name(ksconf,
+ self.create['release'],
+ self.create['name_prefix'],
+ self.create['name_suffix'])
+
+configmgr = ConfigMgr()
diff --git a/scripts/lib/wic/config/wic.conf b/scripts/lib/wic/config/wic.conf
new file mode 100644
index 0000000..a51bcb5
--- /dev/null
+++ b/scripts/lib/wic/config/wic.conf
@@ -0,0 +1,6 @@
+[common]
+; general settings
+distro_name = OpenEmbedded
+
+[create]
+; settings for create subcommand
diff --git a/scripts/lib/wic/creator.py b/scripts/lib/wic/creator.py
new file mode 100644
index 0000000..5231297
--- /dev/null
+++ b/scripts/lib/wic/creator.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os, sys
+from optparse import OptionParser, SUPPRESS_HELP
+
+from wic import msger
+from wic.utils import errors
+from wic.conf import configmgr
+from wic.plugin import pluginmgr
+
+
+class Creator(object):
+ """${name}: create an image
+
+ Usage:
+ ${name} SUBCOMMAND <ksfile> [OPTS]
+
+ ${command_list}
+ ${option_list}
+ """
+
+ name = 'wic create(cr)'
+
+ def __init__(self, *args, **kwargs):
+ self._subcmds = {}
+
+ # get cmds from pluginmgr
+ # mix-in do_subcmd interface
+ for subcmd, klass in pluginmgr.get_plugins('imager').iteritems():
+ if not hasattr(klass, 'do_create'):
+ msger.warning("Unsupported subcmd: %s" % subcmd)
+ continue
+
+ func = getattr(klass, 'do_create')
+ self._subcmds[subcmd] = func
+
+ def get_optparser(self):
+ optparser = OptionParser()
+ optparser.add_option('-d', '--debug', action='store_true',
+ dest='debug',
+ help=SUPPRESS_HELP)
+ optparser.add_option('-v', '--verbose', action='store_true',
+ dest='verbose',
+ help=SUPPRESS_HELP)
+ optparser.add_option('', '--logfile', type='string', dest='logfile',
+ default=None,
+ help='Path of logfile')
+ optparser.add_option('-c', '--config', type='string', dest='config',
+ default=None,
+ help='Specify config file for wic')
+ optparser.add_option('-o', '--outdir', type='string', action='store',
+ dest='outdir', default=None,
+ help='Output directory')
+ optparser.add_option('', '--tmpfs', action='store_true', dest='enabletmpfs',
+ help='Setup tmpdir as tmpfs to accelerate, experimental'
+ ' feature, use it if you have more than 4G memory')
+ return optparser
+
+ def postoptparse(self, options):
+ abspath = lambda pth: os.path.abspath(os.path.expanduser(pth))
+
+ if options.verbose:
+ msger.set_loglevel('verbose')
+ if options.debug:
+ msger.set_loglevel('debug')
+
+ if options.logfile:
+ logfile_abs_path = abspath(options.logfile)
+ if os.path.isdir(logfile_abs_path):
+ raise errors.Usage("logfile's path %s should be file"
+ % options.logfile)
+ if not os.path.exists(os.path.dirname(logfile_abs_path)):
+ os.makedirs(os.path.dirname(logfile_abs_path))
+ msger.set_interactive(False)
+ msger.set_logfile(logfile_abs_path)
+ configmgr.create['logfile'] = options.logfile
+
+ if options.config:
+ configmgr.reset()
+ configmgr._siteconf = options.config
+
+ if options.outdir is not None:
+ configmgr.create['outdir'] = abspath(options.outdir)
+
+ cdir = 'outdir'
+ if os.path.exists(configmgr.create[cdir]) \
+ and not os.path.isdir(configmgr.create[cdir]):
+ msger.error('Invalid directory specified: %s' \
+ % configmgr.create[cdir])
+
+ if options.enabletmpfs:
+ configmgr.create['enabletmpfs'] = options.enabletmpfs
+
+ def main(self, argv=None):
+ if argv is None:
+ argv = sys.argv
+ else:
+ argv = argv[:] # don't modify caller's list
+
+ pname = argv[0]
+ if pname not in self._subcmds:
+ msger.error('Unknown plugin: %s' % pname)
+
+ optparser = self.get_optparser()
+ options, args = optparser.parse_args(argv)
+
+ self.postoptparse(options)
+
+ return self._subcmds[pname](options, *args[1:])
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
new file mode 100644
index 0000000..76b93e8
--- /dev/null
+++ b/scripts/lib/wic/engine.py
@@ -0,0 +1,220 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+
+# This module implements the image creation engine used by 'wic' to
+# create images. The engine parses through the OpenEmbedded kickstart
+# (wks) file specified and generates images that can then be directly
+# written onto media.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import os
+import sys
+
+from wic import msger, creator
+from wic.utils import misc
+from wic.plugin import pluginmgr
+from wic.utils.oe import misc
+
+
+def verify_build_env():
+ """
+ Verify that the build environment is sane.
+
+ Returns True if it is, false otherwise
+ """
+ if not os.environ.get("BUILDDIR"):
+ print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ sys.exit(1)
+
+ return True
+
+
+CANNED_IMAGE_DIR = "lib/wic/canned-wks" # relative to scripts
+SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
+
+def build_canned_image_list(path):
+ layers_path = misc.get_bitbake_var("BBLAYERS")
+ canned_wks_layer_dirs = []
+
+ if layers_path is not None:
+ for layer_path in layers_path.split():
+ cpath = os.path.join(layer_path, SCRIPTS_CANNED_IMAGE_DIR)
+ canned_wks_layer_dirs.append(cpath)
+
+ cpath = os.path.join(path, CANNED_IMAGE_DIR)
+ canned_wks_layer_dirs.append(cpath)
+
+ return canned_wks_layer_dirs
+
+def find_canned_image(scripts_path, wks_file):
+ """
+ Find a .wks file with the given name in the canned files dir.
+
+ Return False if not found
+ """
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname.endswith("~") or fname.endswith("#"):
+ continue
+ if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ fullpath = os.path.join(canned_wks_dir, fname)
+ return fullpath
+ return None
+
+
+def list_canned_images(scripts_path):
+ """
+ List the .wks files in the canned image dir, minus the extension.
+ """
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname.endswith("~") or fname.endswith("#"):
+ continue
+ if fname.endswith(".wks"):
+ fullpath = os.path.join(canned_wks_dir, fname)
+ with open(fullpath) as wks:
+ for line in wks:
+ desc = ""
+ idx = line.find("short-description:")
+ if idx != -1:
+ desc = line[idx + len("short-description:"):].strip()
+ break
+ basename = os.path.splitext(fname)[0]
+ print " %s\t\t%s" % (basename.ljust(30), desc)
+
+
+def list_canned_image_help(scripts_path, fullpath):
+ """
+ List the help and params in the specified canned image.
+ """
+ found = False
+ with open(fullpath) as wks:
+ for line in wks:
+ if not found:
+ idx = line.find("long-description:")
+ if idx != -1:
+ print
+ print line[idx + len("long-description:"):].strip()
+ found = True
+ continue
+ if not line.strip():
+ break
+ idx = line.find("#")
+ if idx != -1:
+ print line[idx + len("#:"):].rstrip()
+ else:
+ break
+
+
+def list_source_plugins():
+ """
+ List the available source plugins i.e. plugins available for --source.
+ """
+ plugins = pluginmgr.get_source_plugins()
+
+ for plugin in plugins:
+ print " %s" % plugin
+
+
+def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, scripts_path, image_output_dir,
+ compressor, debug):
+ """Create image
+
+ wks_file - user-defined OE kickstart file
+ rootfs_dir - absolute path to the build's /rootfs dir
+ bootimg_dir - absolute path to the build's boot artifacts directory
+ kernel_dir - absolute path to the build's kernel directory
+ native_sysroot - absolute path to the build's native sysroots dir
+ scripts_path - absolute path to /scripts dir
+ image_output_dir - dirname to create for image
+ compressor - compressor utility to compress the image
+
+ Normally, the values for the build artifacts values are determined
+ by 'wic -e' from the output of the 'bitbake -e' command given an
+ image name e.g. 'core-image-minimal' and a given machine set in
+ local.conf. If that's the case, the variables get the following
+ values from the output of 'bitbake -e':
+
+ rootfs_dir: IMAGE_ROOTFS
+ kernel_dir: DEPLOY_DIR_IMAGE
+ native_sysroot: STAGING_DIR_NATIVE
+
+ In the above case, bootimg_dir remains unset and the
+ plugin-specific image creation code is responsible for finding the
+ bootimg artifacts.
+
+ In the case where the values are passed in explicitly i.e 'wic -e'
+ is not used but rather the individual 'wic' options are used to
+ explicitly specify these values.
+ """
+ try:
+ oe_builddir = os.environ["BUILDDIR"]
+ except KeyError:
+ print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ sys.exit(1)
+
+ if debug:
+ msger.set_loglevel('debug')
+
+ crobj = creator.Creator()
+
+ crobj.main(["direct", native_sysroot, kernel_dir, bootimg_dir, rootfs_dir,
+ wks_file, image_output_dir, oe_builddir, compressor or ""])
+
+ print "\nThe image(s) were created using OE kickstart file:\n %s" % wks_file
+
+
+def wic_list(args, scripts_path):
+ """
+ Print the list of images or source plugins.
+ """
+ if len(args) < 1:
+ return False
+
+ if args == ["images"]:
+ list_canned_images(scripts_path)
+ return True
+ elif args == ["source-plugins"]:
+ list_source_plugins()
+ return True
+ elif len(args) == 2 and args[1] == "help":
+ wks_file = args[0]
+ fullpath = find_canned_image(scripts_path, wks_file)
+ if not fullpath:
+ print "No image named %s found, exiting. "\
+ "(Use 'wic list images' to list available images, or "\
+ "specify a fully-qualified OE kickstart (.wks) "\
+ "filename)\n" % wks_file
+ sys.exit(1)
+ list_canned_image_help(scripts_path, fullpath)
+ return True
+
+ return False
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
new file mode 100644
index 0000000..405d25a
--- /dev/null
+++ b/scripts/lib/wic/help.py
@@ -0,0 +1,777 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module implements some basic help invocation functions along
+# with the bulk of the help topic text for the OE Core Image Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import subprocess
+import logging
+
+from wic.plugin import pluginmgr, PLUGIN_TYPES
+
+def subcommand_error(args):
+ logging.info("invalid subcommand %s" % args[0])
+
+
+def display_help(subcommand, subcommands):
+ """
+ Display help for subcommand.
+ """
+ if subcommand not in subcommands:
+ return False
+
+ hlp = subcommands.get(subcommand, subcommand_error)[2]
+ if callable(hlp):
+ hlp = hlp()
+ pager = subprocess.Popen('less', stdin=subprocess.PIPE)
+ pager.communicate(hlp)
+
+ return True
+
+
+def wic_help(args, usage_str, subcommands):
+ """
+ Subcommand help dispatcher.
+ """
+ if len(args) == 1 or not display_help(args[1], subcommands):
+ print usage_str
+
+
+def get_wic_plugins_help():
+ """
+ Combine wic_plugins_help with the help for every known
+ source plugin.
+ """
+ result = wic_plugins_help
+ for plugin_type in PLUGIN_TYPES:
+ result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
+ for name, plugin in pluginmgr.get_plugins(plugin_type).iteritems():
+ result += "\n %s plugin:\n" % name
+ if plugin.__doc__:
+ result += plugin.__doc__
+ else:
+ result += "\n %s is missing docstring\n" % plugin
+ return result
+
+
+def invoke_subcommand(args, parser, main_command_usage, subcommands):
+ """
+ Dispatch to subcommand handler borrowed from combo-layer.
+ Should use argparse, but has to work in 2.6.
+ """
+ if not args:
+ logging.error("No subcommand specified, exiting")
+ parser.print_help()
+ return 1
+ elif args[0] == "help":
+ wic_help(args, main_command_usage, subcommands)
+ elif args[0] not in subcommands:
+ logging.error("Unsupported subcommand %s, exiting\n" % (args[0]))
+ parser.print_help()
+ return 1
+ else:
+ usage = subcommands.get(args[0], subcommand_error)[1]
+ subcommands.get(args[0], subcommand_error)[0](args[1:], usage)
+
+
+##
+# wic help and usage strings
+##
+
+wic_usage = """
+
+ Create a customized OpenEmbedded image
+
+ usage: wic [--version] | [--help] | [COMMAND [ARGS]]
+
+ Current 'wic' commands are:
+ help Show help for command or one of the topics (see below)
+ create Create a new OpenEmbedded image
+ list List available canned images and source plugins
+
+ Help topics:
+ overview wic overview - General overview of wic
+ plugins wic plugins - Overview and API
+ kickstart wic kickstart - wic kickstart reference
+"""
+
+wic_help_usage = """
+
+ usage: wic help <subcommand>
+
+ This command displays detailed help for the specified subcommand.
+"""
+
+wic_create_usage = """
+
+ Create a new OpenEmbedded image
+
+ usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir]
+ [-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
+
+ This command creates an OpenEmbedded image based on the 'OE kickstart
+ commands' found in the <wks file>.
+
+ The -o option can be used to place the image in a directory with a
+ different name and location.
+
+ See 'wic help create' for more detailed instructions.
+"""
+
+wic_create_help = """
+
+NAME
+ wic create - Create a new OpenEmbedded image
+
+SYNOPSIS
+ wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir]
+ [-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
+ [-c, --compress-with]
+
+DESCRIPTION
+ This command creates an OpenEmbedded image based on the 'OE
+ kickstart commands' found in the <wks file>.
+
+ In order to do this, wic needs to know the locations of the
+ various build artifacts required to build the image.
+
+ Users can explicitly specify the build artifact locations using
+ the -r, -b, -k, and -n options. See below for details on where
+ the corresponding artifacts are typically found in a normal
+ OpenEmbedded build.
+
+ Alternatively, users can use the -e option to have 'wic' determine
+ those locations for a given image. If the -e option is used, the
+ user needs to have set the appropriate MACHINE variable in
+ local.conf, and have sourced the build environment.
+
+ The -e option is used to specify the name of the image to use the
+ artifacts from e.g. core-image-sato.
+
+ The -r option is used to specify the path to the /rootfs dir to
+ use as the .wks rootfs source.
+
+ The -b option is used to specify the path to the dir containing
+ the boot artifacts (e.g. /EFI or /syslinux dirs) to use as the
+ .wks bootimg source.
+
+ The -k option is used to specify the path to the dir containing
+ the kernel to use in the .wks bootimg.
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools to use to build the image.
+
+ The -f option is used to build rootfs by running "bitbake <image>"
+
+ The -s option is used to skip the build check. The build check is
+ a simple sanity check used to determine whether the user has
+ sourced the build environment so that the -e option can operate
+ correctly. If the user has specified the build artifact locations
+ explicitly, 'wic' assumes the user knows what he or she is doing
+ and skips the build check.
+
+ The -D option is used to display debug information detailing
+ exactly what happens behind the scenes when a create request is
+ fulfilled (or not, as the case may be). It enumerates and
+ displays the command sequence used, and should be included in any
+ bug report describing unexpected results.
+
+ When 'wic -e' is used, the locations for the build artifacts
+ values are determined by 'wic -e' from the output of the 'bitbake
+ -e' command given an image name e.g. 'core-image-minimal' and a
+ given machine set in local.conf. In that case, the image is
+ created as if the following 'bitbake -e' variables were used:
+
+ -r: IMAGE_ROOTFS
+ -k: STAGING_KERNEL_DIR
+ -n: STAGING_DIR_NATIVE
+ -b: empty (plugin-specific handlers must determine this)
+
+ If 'wic -e' is not used, the user needs to select the appropriate
+ value for -b (as well as -r, -k, and -n).
+
+ The -o option can be used to place the image in a directory with a
+ different name and location.
+
+ The -c option is used to specify compressor utility to compress
+ an image. gzip, bzip2 and xz compressors are supported.
+"""
+
+wic_list_usage = """
+
+ List available OpenEmbedded images and source plugins
+
+ usage: wic list images
+ wic list <image> help
+ wic list source-plugins
+
+ This command enumerates the set of available canned images as well as
+ help for those images. It also can be used to list of available source
+ plugins.
+
+ The first form enumerates all the available 'canned' images.
+
+ The second form lists the detailed help information for a specific
+ 'canned' image.
+
+ The third form enumerates all the available --sources (source
+ plugins).
+
+ See 'wic help list' for more details.
+"""
+
+wic_list_help = """
+
+NAME
+ wic list - List available OpenEmbedded images and source plugins
+
+SYNOPSIS
+ wic list images
+ wic list <image> help
+ wic list source-plugins
+
+DESCRIPTION
+ This command enumerates the set of available canned images as well
+ as help for those images. It also can be used to list available
+ source plugins.
+
+ The first form enumerates all the available 'canned' images.
+ These are actually just the set of .wks files that have been moved
+ into the /scripts/lib/wic/canned-wks directory).
+
+ The second form lists the detailed help information for a specific
+ 'canned' image.
+
+ The third form enumerates all the available --sources (source
+ plugins). The contents of a given partition are driven by code
+ defined in 'source plugins'. Users specify a specific plugin via
+ the --source parameter of the partition .wks command. Normally
+ this is the 'rootfs' plugin but can be any of the more specialized
+ sources listed by the 'list source-plugins' command. Users can
+ also add their own source plugins - see 'wic help plugins' for
+ details.
+"""
+
+wic_plugins_help = """
+
+NAME
+ wic plugins - Overview and API
+
+DESCRIPTION
+ plugins allow wic functionality to be extended and specialized by
+ users. This section documents the plugin interface, which is
+ currently restricted to 'source' plugins.
+
+ 'Source' plugins provide a mechanism to customize various aspects
+ of the image generation process in wic, mainly the contents of
+ partitions.
+
+ Source plugins provide a mechanism for mapping values specified in
+ .wks files using the --source keyword to a particular plugin
+ implementation that populates a corresponding partition.
+
+ A source plugin is created as a subclass of SourcePlugin (see
+ scripts/lib/wic/pluginbase.py) and the plugin file containing it
+ is added to scripts/lib/wic/plugins/source/ to make the plugin
+ implementation available to the wic implementation.
+
+ Source plugins can also be implemented and added by external
+ layers - any plugins found in a scripts/lib/wic/plugins/source/
+ directory in an external layer will also be made available.
+
+ When the wic implementation needs to invoke a partition-specific
+ implementation, it looks for the plugin that has the same name as
+ the --source param given to that partition. For example, if the
+ partition is set up like this:
+
+ part /boot --source bootimg-pcbios ...
+
+ then the methods defined as class members of the plugin having the
+ matching bootimg-pcbios .name class member would be used.
+
+ To be more concrete, here's the plugin definition that would match
+ a '--source bootimg-pcbios' usage, along with an example method
+ that would be called by the wic implementation when it needed to
+ invoke an implementation-specific partition-preparation function:
+
+ class BootimgPcbiosPlugin(SourcePlugin):
+ name = 'bootimg-pcbios'
+
+ @classmethod
+ def do_prepare_partition(self, part, ...)
+
+ If the subclass itself doesn't implement a function, a 'default'
+ version in a superclass will be located and used, which is why all
+ plugins must be derived from SourcePlugin.
+
+ The SourcePlugin class defines the following methods, which is the
+ current set of methods that can be implemented/overridden by
+ --source plugins. Any methods not implemented by a SourcePlugin
+ subclass inherit the implementations present in the SourcePlugin
+ class (see the SourcePlugin source for details):
+
+ do_prepare_partition()
+ Called to do the actual content population for a
+ partition. In other words, it 'prepares' the final partition
+ image which will be incorporated into the disk image.
+
+ do_configure_partition()
+ Called before do_prepare_partition(), typically used to
+ create custom configuration files for a partition, for
+ example syslinux or grub config files.
+
+ do_install_disk()
+ Called after all partitions have been prepared and assembled
+ into a disk image. This provides a hook to allow
+ finalization of a disk image, for example to write an MBR to
+ it.
+
+ do_stage_partition()
+ Special content-staging hook called before
+ do_prepare_partition(), normally empty.
+
+ Typically, a partition will just use the passed-in
+ parameters, for example the unmodified value of bootimg_dir.
+ In some cases however, things may need to be more tailored.
+ As an example, certain files may additionally need to be
+ take from bootimg_dir + /boot. This hook allows those files
+ to be staged in a customized fashion. Note that
+ get_bitbake_var() allows you to access non-standard
+ variables that you might want to use for these types of
+ situations.
+
+ This scheme is extensible - adding more hooks is a simple matter
+ of adding more plugin methods to SourcePlugin and derived classes.
+ The code that then needs to call the plugin methods uses
+ plugin.get_source_plugin_methods() to find the method(s) needed by
+ the call; this is done by filling up a dict with keys containing
+ the method names of interest - on success, these will be filled in
+ with the actual methods. Please see the implementation for
+ examples and details.
+"""
+
+wic_overview_help = """
+
+NAME
+ wic overview - General overview of wic
+
+DESCRIPTION
+ The 'wic' command generates partitioned images from existing
+ OpenEmbedded build artifacts. Image generation is driven by
+ partitioning commands contained in an 'Openembedded kickstart'
+ (.wks) file (see 'wic help kickstart') specified either directly
+ on the command-line or as one of a selection of canned .wks files
+ (see 'wic list images'). When applied to a given set of build
+ artifacts, the result is an image or set of images that can be
+ directly written onto media and used on a particular system.
+
+ The 'wic' command and the infrastructure it's based on is by
+ definition incomplete - its purpose is to allow the generation of
+ customized images, and as such was designed to be completely
+ extensible via a plugin interface (see 'wic help plugins').
+
+ Background and Motivation
+
+ wic is meant to be a completely independent standalone utility
+ that initially provides easier-to-use and more flexible
+ replacements for a couple bits of existing functionality in
+ oe-core: directdisk.bbclass and mkefidisk.sh. The difference
+ between wic and those examples is that with wic the functionality
+ of those scripts is implemented by a general-purpose partitioning
+ 'language' based on Redhat kickstart syntax).
+
+ The initial motivation and design considerations that lead to the
+ current tool are described exhaustively in Yocto Bug #3847
+ (https://bugzilla.yoctoproject.org/show_bug.cgi?id=3847).
+
+ Implementation and Examples
+
+ wic can be used in two different modes, depending on how much
+ control the user needs in specifying the Openembedded build
+ artifacts that will be used in creating the image: 'raw' and
+ 'cooked'.
+
+ If used in 'raw' mode, artifacts are explicitly specified via
+ command-line arguments (see example below).
+
+ The more easily usable 'cooked' mode uses the current MACHINE
+ setting and a specified image name to automatically locate the
+ artifacts used to create the image.
+
+ OE kickstart files (.wks) can of course be specified directly on
+ the command-line, but the user can also choose from a set of
+ 'canned' .wks files available via the 'wic list images' command
+ (example below).
+
+ In any case, the prerequisite for generating any image is to have
+ the build artifacts already available. The below examples assume
+ the user has already build a 'core-image-minimal' for a specific
+ machine (future versions won't require this redundant step, but
+ for now that's typically how build artifacts get generated).
+
+ The other prerequisite is to source the build environment:
+
+ $ source oe-init-build-env
+
+ To start out with, we'll generate an image from one of the canned
+ .wks files. The following generates a list of availailable
+ images:
+
+ $ wic list images
+ mkefidisk Create an EFI disk image
+ directdisk Create a 'pcbios' direct disk image
+
+ You can get more information about any of the available images by
+ typing 'wic list xxx help', where 'xxx' is one of the image names:
+
+ $ wic list mkefidisk help
+
+ Creates a partitioned EFI disk image that the user can directly dd
+ to boot media.
+
+ At any time, you can get help on the 'wic' command or any
+ subcommand (currently 'list' and 'create'). For instance, to get
+ the description of 'wic create' command and its parameters:
+
+ $ wic create
+
+ Usage:
+
+ Create a new OpenEmbedded image
+
+ usage: wic create <wks file or image name> [-o <DIRNAME> | ...]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir] [-k, --kernel-dir]
+ [-n, --native-sysroot] [-f, --build-rootfs]
+
+ This command creates an OpenEmbedded image based on the 'OE
+ kickstart commands' found in the <wks file>.
+
+ The -o option can be used to place the image in a directory
+ with a different name and location.
+
+ See 'wic help create' for more detailed instructions.
+ ...
+
+ As mentioned in the command, you can get even more detailed
+ information by adding 'help' to the above:
+
+ $ wic help create
+
+ So, the easiest way to create an image is to use the -e option
+ with a canned .wks file. To use the -e option, you need to
+ specify the image used to generate the artifacts and you actually
+ need to have the MACHINE used to build them specified in your
+ local.conf (these requirements aren't necessary if you aren't
+ using the -e options.) Below, we generate a directdisk image,
+ pointing the process at the core-image-minimal artifacts for the
+ current MACHINE:
+
+ $ wic create directdisk -e core-image-minimal
+
+ Checking basic build environment...
+ Done.
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ /var/tmp/wic/build/directdisk-201309252350-sda.direct
+
+ The following build artifacts were used to create the image(s):
+
+ ROOTFS_DIR: ...
+ BOOTIMG_DIR: ...
+ KERNEL_DIR: ...
+ NATIVE_SYSROOT: ...
+
+ The image(s) were created using OE kickstart file:
+ .../scripts/lib/wic/canned-wks/directdisk.wks
+
+ The output shows the name and location of the image created, and
+ so that you know exactly what was used to generate the image, each
+ of the artifacts and the kickstart file used.
+
+ Similarly, you can create a 'mkefidisk' image in the same way
+ (notice that this example uses a different machine - because it's
+ using the -e option, you need to change the MACHINE in your
+ local.conf):
+
+ $ wic create mkefidisk -e core-image-minimal
+ Checking basic build environment...
+ Done.
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ /var/tmp/wic/build/mkefidisk-201309260027-sda.direct
+
+ ...
+
+ Here's an example that doesn't take the easy way out and manually
+ specifies each build artifact, along with a non-canned .wks file,
+ and also uses the -o option to have wic create the output
+ somewhere other than the default /var/tmp/wic:
+
+ $ wic create ./test.wks -o ./out --rootfs-dir
+ tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
+ --bootimg-dir tmp/sysroots/qemux86-64/usr/share
+ --kernel-dir tmp/deploy/images/qemux86-64
+ --native-sysroot tmp/sysroots/x86_64-linux
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ out/build/test-201507211313-sda.direct
+
+ The following build artifacts were used to create the image(s):
+ ROOTFS_DIR: tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
+ BOOTIMG_DIR: tmp/sysroots/qemux86-64/usr/share
+ KERNEL_DIR: tmp/deploy/images/qemux86-64
+ NATIVE_SYSROOT: tmp/sysroots/x86_64-linux
+
+ The image(s) were created using OE kickstart file:
+ ./test.wks
+
+ Here is a content of test.wks:
+
+ part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+ part / --source rootfs --ondisk sda --fstype=ext3 --label platform --align 1024
+
+ bootloader --timeout=0 --append="rootwait rootfstype=ext3 video=vesafb vga=0x318 console=tty0"
+
+
+ Finally, here's an example of the actual partition language
+ commands used to generate the mkefidisk image i.e. these are the
+ contents of the mkefidisk.wks OE kickstart file:
+
+ # short-description: Create an EFI disk image
+ # long-description: Creates a partitioned EFI disk image that the user
+ # can directly dd to boot media.
+
+ part /boot --source bootimg-efi --ondisk sda --fstype=efi --active
+
+ part / --source rootfs --ondisk sda --fstype=ext3 --label platform
+
+ part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+ bootloader --timeout=10 --append="rootwait console=ttyPCH0,115200"
+
+ You can get a complete listing and description of all the
+ kickstart commands available for use in .wks files from 'wic help
+ kickstart'.
+"""
+
+wic_kickstart_help = """
+
+NAME
+ wic kickstart - wic kickstart reference
+
+DESCRIPTION
+ This section provides the definitive reference to the wic
+ kickstart language. It also provides documentation on the list of
+ --source plugins available for use from the 'part' command (see
+ the 'Platform-specific Plugins' section below).
+
+ The current wic implementation supports only the basic kickstart
+ partitioning commands: partition (or part for short) and
+ bootloader.
+
+ The following is a listing of the commands, their syntax, and
+ meanings. The commands are based on the Fedora kickstart
+ documentation but with modifications to reflect wic capabilities.
+
+ http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
+ http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+
+ Commands
+
+ * 'part' or 'partition'
+
+ This command creates a partition on the system and uses the
+ following syntax:
+
+ part [<mountpoint>]
+
+ The <mountpoint> is where the partition will be mounted and
+ must take of one of the following forms:
+
+ /<path>: For example: /, /usr, or /home
+
+ swap: The partition will be used as swap space.
+
+ If a <mountpoint> is not specified the partition will be created
+ but will not be mounted.
+
+ Partitions with a <mountpoint> specified will be automatically mounted.
+ This is achieved by wic adding entries to the fstab during image
+ generation. In order for a valid fstab to be generated one of the
+ --ondrive, --ondisk or --use-uuid partition options must be used for
+ each partition that specifies a mountpoint.
+
+
+ The following are supported 'part' options:
+
+ --size: The minimum partition size. Specify an integer value
+ such as 500. Multipliers k, M ang G can be used. If
+ not specified, the size is in MB.
+ You do not need this option if you use --source.
+
+ --source: This option is a wic-specific option that names the
+ source of the data that will populate the
+ partition. The most common value for this option
+ is 'rootfs', but can be any value which maps to a
+ valid 'source plugin' (see 'wic help plugins').
+
+ If '--source rootfs' is used, it tells the wic
+ command to create a partition as large as needed
+ and to fill it with the contents of the root
+ filesystem pointed to by the '-r' wic command-line
+ option (or the equivalent rootfs derived from the
+ '-e' command-line option). The filesystem type
+ that will be used to create the partition is driven
+ by the value of the --fstype option specified for
+ the partition (see --fstype below).
+
+ If --source <plugin-name>' is used, it tells the
+ wic command to create a partition as large as
+ needed and to fill with the contents of the
+ partition that will be generated by the specified
+ plugin name using the data pointed to by the '-r'
+ wic command-line option (or the equivalent rootfs
+ derived from the '-e' command-line option).
+ Exactly what those contents and filesystem type end
+ up being are dependent on the given plugin
+ implementation.
+
+ If --source option is not used, the wic command
+ will create empty partition. --size parameter has
+ to be used to specify size of empty partition.
+
+ --ondisk or --ondrive: Forces the partition to be created on
+ a particular disk.
+
+ --fstype: Sets the file system type for the partition. These
+ apply to partitions created using '--source rootfs' (see
+ --source above). Valid values are:
+
+ ext2
+ ext3
+ ext4
+ btrfs
+ squashfs
+ swap
+
+ --fsoptions: Specifies a free-form string of options to be
+ used when mounting the filesystem. This string
+ will be copied into the /etc/fstab file of the
+ installed system and should be enclosed in
+ quotes. If not specified, the default string is
+ "defaults".
+
+ --label label: Specifies the label to give to the filesystem
+ to be made on the partition. If the given
+ label is already in use by another filesystem,
+ a new label is created for the partition.
+
+ --active: Marks the partition as active.
+
+ --align (in KBytes): This option is specific to wic and says
+ to start a partition on an x KBytes
+ boundary.
+
+ --no-table: This option is specific to wic. Space will be
+ reserved for the partition and it will be
+ populated but it will not be added to the
+ partition table. It may be useful for
+ bootloaders.
+
+ --extra-space: This option is specific to wic. It adds extra
+ space after the space filled by the content
+ of the partition. The final size can go
+ beyond the size specified by --size.
+ By default, 10MB.
+
+ --overhead-factor: This option is specific to wic. The
+ size of the partition is multiplied by
+ this factor. It has to be greater than or
+ equal to 1.
+ The default value is 1.3.
+
+ --part-type: This option is specific to wic. It specifies partition
+ type GUID for GPT partitions.
+ List of partition type GUIDS can be found here:
+ http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
+
+ --use-uuid: This option is specific to wic. It makes wic to generate
+ random globally unique identifier (GUID) for the partition
+ and use it in bootloader configuration to specify root partition.
+
+ --uuid: This option is specific to wic. It specifies partition UUID.
+ It's useful if preconfigured partition UUID is added to kernel command line
+ in bootloader configuration before running wic. In this case .wks file can
+ be generated or modified to set preconfigured parition UUID using this option.
+
+ * bootloader
+
+ This command allows the user to specify various bootloader
+ options. The following are supported 'bootloader' options:
+
+ --timeout: Specifies the number of seconds before the
+ bootloader times out and boots the default option.
+
+ --append: Specifies kernel parameters. These will be added to
+ bootloader command-line - for example, the syslinux
+ APPEND or grub kernel command line.
+
+ --configfile: Specifies a user defined configuration file for
+ the bootloader. This file must be located in the
+ canned-wks folder or could be the full path to the
+ file. Using this option will override any other
+ bootloader option.
+
+ Note that bootloader functionality and boot partitions are
+ implemented by the various --source plugins that implement
+ bootloader functionality; the bootloader command essentially
+ provides a means of modifying bootloader configuration.
+
+ * include
+
+ This command allows the user to include the content of .wks file
+ into original .wks file.
+
+ Command uses the following syntax:
+
+ include <file>
+
+ The <file> is either path to the file or its name. If name is
+ specified wic will try to find file in the directories with canned
+ .wks files.
+
+"""
diff --git a/scripts/lib/wic/imager/__init__.py b/scripts/lib/wic/imager/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/wic/imager/__init__.py
diff --git a/scripts/lib/wic/imager/baseimager.py b/scripts/lib/wic/imager/baseimager.py
new file mode 100644
index 0000000..760cf8a
--- /dev/null
+++ b/scripts/lib/wic/imager/baseimager.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2007 Red Hat Inc.
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+from __future__ import with_statement
+import os
+import tempfile
+import shutil
+
+from wic import msger
+from wic.utils.errors import CreatorError
+from wic.utils import runner
+
+class BaseImageCreator(object):
+ """Base class for image creation.
+
+ BaseImageCreator is the simplest creator class available; it will
+ create a system image according to the supplied kickstart file.
+
+ e.g.
+
+ import wic.imgcreate as imgcreate
+ ks = imgcreate.read_kickstart("foo.ks")
+ imgcreate.ImageCreator(ks, "foo").create()
+ """
+
+ def __del__(self):
+ self.cleanup()
+
+ def __init__(self, createopts=None):
+ """Initialize an ImageCreator instance.
+
+ ks -- a pykickstart.KickstartParser instance; this instance will be
+ used to drive the install by e.g. providing the list of packages
+ to be installed, the system configuration and %post scripts
+
+ name -- a name for the image; used for e.g. image filenames or
+ filesystem labels
+ """
+
+ self.__builddir = None
+
+ self.ks = None
+ self.name = "target"
+ self.tmpdir = "/var/tmp/wic"
+ self.workdir = "/var/tmp/wic/build"
+
+ # setup tmpfs tmpdir when enabletmpfs is True
+ self.enabletmpfs = False
+
+ if createopts:
+ # Mapping table for variables that have different names.
+ optmap = {"outdir" : "destdir",
+ }
+
+ # update setting from createopts
+ for key in createopts.keys():
+ if key in optmap:
+ option = optmap[key]
+ else:
+ option = key
+ setattr(self, option, createopts[key])
+
+ self.destdir = os.path.abspath(os.path.expanduser(self.destdir))
+
+ self._dep_checks = ["ls", "bash", "cp", "echo"]
+
+ # Output image file names
+ self.outimage = []
+
+ # No ks provided when called by convertor, so skip the dependency check
+ if self.ks:
+ # If we have btrfs partition we need to check necessary tools
+ for part in self.ks.partitions:
+ if part.fstype and part.fstype == "btrfs":
+ self._dep_checks.append("mkfs.btrfs")
+ break
+
+ # make sure the specified tmpdir and cachedir exist
+ if not os.path.exists(self.tmpdir):
+ os.makedirs(self.tmpdir)
+
+
+ #
+ # Hooks for subclasses
+ #
+ def _create(self):
+ """Create partitions for the disk image(s)
+
+ This is the hook where subclasses may create the partitions
+ that will be assembled into disk image(s).
+
+ There is no default implementation.
+ """
+ pass
+
+ def _cleanup(self):
+ """Undo anything performed in _create().
+
+ This is the hook where subclasses must undo anything which was
+ done in _create().
+
+ There is no default implementation.
+
+ """
+ pass
+
+ #
+ # Actual implementation
+ #
+ def __ensure_builddir(self):
+ if not self.__builddir is None:
+ return
+
+ try:
+ self.workdir = os.path.join(self.tmpdir, "build")
+ if not os.path.exists(self.workdir):
+ os.makedirs(self.workdir)
+ self.__builddir = tempfile.mkdtemp(dir=self.workdir,
+ prefix="imgcreate-")
+ except OSError as err:
+ raise CreatorError("Failed create build directory in %s: %s" %
+ (self.tmpdir, err))
+
+ def __setup_tmpdir(self):
+ if not self.enabletmpfs:
+ return
+
+ runner.show('mount -t tmpfs -o size=4G tmpfs %s' % self.workdir)
+
+ def __clean_tmpdir(self):
+ if not self.enabletmpfs:
+ return
+
+ runner.show('umount -l %s' % self.workdir)
+
+ def create(self):
+ """Create partitions for the disk image(s)
+
+ Create the partitions that will be assembled into disk
+ image(s).
+ """
+ self.__setup_tmpdir()
+ self.__ensure_builddir()
+
+ self._create()
+
+ def cleanup(self):
+ """Undo anything performed in create().
+
+ Note, make sure to call this method once finished with the creator
+ instance in order to ensure no stale files are left on the host e.g.:
+
+ creator = ImageCreator(ks, name)
+ try:
+ creator.create()
+ finally:
+ creator.cleanup()
+
+ """
+ if not self.__builddir:
+ return
+
+ self._cleanup()
+
+ shutil.rmtree(self.__builddir, ignore_errors=True)
+ self.__builddir = None
+
+ self.__clean_tmpdir()
+
+
+ def print_outimage_info(self):
+ msg = "The new image can be found here:\n"
+ self.outimage.sort()
+ for path in self.outimage:
+ msg += ' %s\n' % os.path.abspath(path)
+
+ msger.info(msg)
diff --git a/scripts/lib/wic/imager/direct.py b/scripts/lib/wic/imager/direct.py
new file mode 100644
index 0000000..a1b4249
--- /dev/null
+++ b/scripts/lib/wic/imager/direct.py
@@ -0,0 +1,380 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'direct' image creator class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import os
+import shutil
+
+from wic import msger
+from wic.utils import fs_related
+from wic.utils.oe.misc import get_bitbake_var
+from wic.utils.partitionedfs import Image
+from wic.utils.errors import CreatorError, ImageError
+from wic.imager.baseimager import BaseImageCreator
+from wic.plugin import pluginmgr
+from wic.utils.oe.misc import exec_cmd
+
+disk_methods = {
+ "do_install_disk":None,
+}
+
+class DirectImageCreator(BaseImageCreator):
+ """
+ Installs a system into a file containing a partitioned disk image.
+
+ DirectImageCreator is an advanced ImageCreator subclass; an image
+ file is formatted with a partition table, each partition created
+ from a rootfs or other OpenEmbedded build artifact and dd'ed into
+ the virtual disk. The disk image can subsequently be dd'ed onto
+ media and used on actual hardware.
+ """
+
+ def __init__(self, oe_builddir, image_output_dir, rootfs_dir, bootimg_dir,
+ kernel_dir, native_sysroot, compressor, creatoropts=None):
+ """
+ Initialize a DirectImageCreator instance.
+
+ This method takes the same arguments as ImageCreator.__init__()
+ """
+ BaseImageCreator.__init__(self, creatoropts)
+
+ self.__image = None
+ self.__disks = {}
+ self.__disk_format = "direct"
+ self._disk_names = []
+ self.ptable_format = self.ks.bootloader.ptable
+
+ self.oe_builddir = oe_builddir
+ if image_output_dir:
+ self.tmpdir = image_output_dir
+ self.rootfs_dir = rootfs_dir
+ self.bootimg_dir = bootimg_dir
+ self.kernel_dir = kernel_dir
+ self.native_sysroot = native_sysroot
+ self.compressor = compressor
+
+ def __get_part_num(self, num, parts):
+ """calculate the real partition number, accounting for partitions not
+ in the partition table and logical partitions
+ """
+ realnum = 0
+ for pnum, part in enumerate(parts, 1):
+ if not part.no_table:
+ realnum += 1
+ if pnum == num:
+ if part.no_table:
+ return 0
+ if self.ptable_format == 'msdos' and realnum > 3:
+ # account for logical partition numbering, ex. sda5..
+ return realnum + 1
+ return realnum
+
+ def _write_fstab(self, image_rootfs):
+ """overriden to generate fstab (temporarily) in rootfs. This is called
+ from _create, make sure it doesn't get called from
+ BaseImage.create()
+ """
+ if not image_rootfs:
+ return
+
+ fstab_path = image_rootfs + "/etc/fstab"
+ if not os.path.isfile(fstab_path):
+ return
+
+ with open(fstab_path) as fstab:
+ fstab_lines = fstab.readlines()
+
+ if self._update_fstab(fstab_lines, self._get_parts()):
+ shutil.copyfile(fstab_path, fstab_path + ".orig")
+
+ with open(fstab_path, "w") as fstab:
+ fstab.writelines(fstab_lines)
+
+ return fstab_path
+
+ def _update_fstab(self, fstab_lines, parts):
+ """Assume partition order same as in wks"""
+ updated = False
+ for num, part in enumerate(parts, 1):
+ pnum = self.__get_part_num(num, parts)
+ if not pnum or not part.mountpoint \
+ or part.mountpoint in ("/", "/boot"):
+ continue
+
+ # mmc device partitions are named mmcblk0p1, mmcblk0p2..
+ prefix = 'p' if part.disk.startswith('mmcblk') else ''
+ device_name = "/dev/%s%s%d" % (part.disk, prefix, pnum)
+
+ opts = part.fsopts if part.fsopts else "defaults"
+ line = "\t".join([device_name, part.mountpoint, part.fstype,
+ opts, "0", "0"]) + "\n"
+
+ fstab_lines.append(line)
+ updated = True
+
+ return updated
+
+ def set_bootimg_dir(self, bootimg_dir):
+ """
+ Accessor for bootimg_dir, the actual location used for the source
+ of the bootimg. Should be set by source plugins (only if they
+ change the default bootimg source) so the correct info gets
+ displayed for print_outimage_info().
+ """
+ self.bootimg_dir = bootimg_dir
+
+ def _get_parts(self):
+ if not self.ks:
+ raise CreatorError("Failed to get partition info, "
+ "please check your kickstart setting.")
+
+ # Set a default partition if no partition is given out
+ if not self.ks.partitions:
+ partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
+ args = partstr.split()
+ part = self.ks.parse(args[1:])
+ if part not in self.ks.partitions:
+ self.ks.partitions.append(part)
+
+ # partitions list from kickstart file
+ return self.ks.partitions
+
+ def get_disk_names(self):
+ """ Returns a list of physical target disk names (e.g., 'sdb') which
+ will be created. """
+
+ if self._disk_names:
+ return self._disk_names
+
+ #get partition info from ks handler
+ parts = self._get_parts()
+
+ for i in range(len(parts)):
+ if parts[i].disk:
+ disk_name = parts[i].disk
+ else:
+ raise CreatorError("Failed to create disks, no --ondisk "
+ "specified in partition line of ks file")
+
+ if parts[i].mountpoint and not parts[i].fstype:
+ raise CreatorError("Failed to create disks, no --fstype "
+ "specified for partition with mountpoint "
+ "'%s' in the ks file")
+
+ self._disk_names.append(disk_name)
+
+ return self._disk_names
+
+ def _full_name(self, name, extention):
+ """ Construct full file name for a file we generate. """
+ return "%s-%s.%s" % (self.name, name, extention)
+
+ def _full_path(self, path, name, extention):
+ """ Construct full file path to a file we generate. """
+ return os.path.join(path, self._full_name(name, extention))
+
+ def get_default_source_plugin(self):
+ """
+ The default source plugin i.e. the plugin that's consulted for
+ overall image generation tasks outside of any particular
+ partition. For convenience, we just hang it off the
+ bootloader handler since it's the one non-partition object in
+ any setup. By default the default plugin is set to the same
+ plugin as the /boot partition; since we hang it off the
+ bootloader object, the default can be explicitly set using the
+ --source bootloader param.
+ """
+ return self.ks.bootloader.source
+
+ #
+ # Actual implemention
+ #
+ def _create(self):
+ """
+ For 'wic', we already have our build artifacts - we just create
+ filesystems from the artifacts directly and combine them into
+ a partitioned image.
+ """
+ parts = self._get_parts()
+
+ self.__image = Image(self.native_sysroot)
+
+ for part in parts:
+ # as a convenience, set source to the boot partition source
+ # instead of forcing it to be set via bootloader --source
+ if not self.ks.bootloader.source and part.mountpoint == "/boot":
+ self.ks.bootloader.source = part.source
+
+ fstab_path = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+
+ shutil.rmtree(self.workdir)
+ os.mkdir(self.workdir)
+
+ for part in parts:
+ # get rootfs size from bitbake variable if it's not set in .ks file
+ if not part.size:
+ # and if rootfs name is specified for the partition
+ image_name = part.rootfs_dir
+ if image_name:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
+ if rsize_bb:
+ part.size = int(round(float(rsize_bb)))
+ # need to create the filesystems in order to get their
+ # sizes before we can add them and do the layout.
+ # Image.create() actually calls __format_disks() to create
+ # the disk images and carve out the partitions, then
+ # self.assemble() calls Image.assemble() which calls
+ # __write_partitition() for each partition to dd the fs
+ # into the partitions.
+ part.prepare(self, self.workdir, self.oe_builddir, self.rootfs_dir,
+ self.bootimg_dir, self.kernel_dir, self.native_sysroot)
+
+
+ self.__image.add_partition(int(part.size),
+ part.disk,
+ part.mountpoint,
+ part.source_file,
+ part.fstype,
+ part.label,
+ fsopts=part.fsopts,
+ boot=part.active,
+ align=part.align,
+ no_table=part.no_table,
+ part_type=part.part_type,
+ uuid=part.uuid)
+
+ if fstab_path:
+ shutil.move(fstab_path + ".orig", fstab_path)
+
+ self.__image.layout_partitions(self.ptable_format)
+
+ self.__imgdir = self.workdir
+ for disk_name, disk in self.__image.disks.items():
+ full_path = self._full_path(self.__imgdir, disk_name, "direct")
+ msger.debug("Adding disk %s as %s with size %s bytes" \
+ % (disk_name, full_path, disk['min_size']))
+ disk_obj = fs_related.DiskImage(full_path, disk['min_size'])
+ self.__disks[disk_name] = disk_obj
+ self.__image.add_disk(disk_name, disk_obj)
+
+ self.__image.create()
+
+ def assemble(self):
+ """
+ Assemble partitions into disk image(s)
+ """
+ for disk_name, disk in self.__image.disks.items():
+ full_path = self._full_path(self.__imgdir, disk_name, "direct")
+ msger.debug("Assembling disk %s as %s with size %s bytes" \
+ % (disk_name, full_path, disk['min_size']))
+ self.__image.assemble(full_path)
+
+ def finalize(self):
+ """
+ Finalize the disk image.
+
+ For example, prepare the image to be bootable by e.g.
+ creating and installing a bootloader configuration.
+
+ """
+ source_plugin = self.get_default_source_plugin()
+ if source_plugin:
+ self._source_methods = pluginmgr.get_source_plugin_methods(source_plugin, disk_methods)
+ for disk_name, disk in self.__image.disks.items():
+ self._source_methods["do_install_disk"](disk, disk_name, self,
+ self.workdir,
+ self.oe_builddir,
+ self.bootimg_dir,
+ self.kernel_dir,
+ self.native_sysroot)
+ # Compress the image
+ if self.compressor:
+ for disk_name, disk in self.__image.disks.items():
+ full_path = self._full_path(self.__imgdir, disk_name, "direct")
+ msger.debug("Compressing disk %s with %s" % \
+ (disk_name, self.compressor))
+ exec_cmd("%s %s" % (self.compressor, full_path))
+
+ def print_outimage_info(self):
+ """
+ Print the image(s) and artifacts used, for the user.
+ """
+ msg = "The new image(s) can be found here:\n"
+
+ parts = self._get_parts()
+
+ for disk_name in self.__image.disks:
+ extension = "direct" + {"gzip": ".gz",
+ "bzip2": ".bz2",
+ "xz": ".xz",
+ "": ""}.get(self.compressor)
+ full_path = self._full_path(self.__imgdir, disk_name, extension)
+ msg += ' %s\n\n' % full_path
+
+ msg += 'The following build artifacts were used to create the image(s):\n'
+ for part in parts:
+ if part.rootfs_dir is None:
+ continue
+ if part.mountpoint == '/':
+ suffix = ':'
+ else:
+ suffix = '["%s"]:' % (part.mountpoint or part.label)
+ msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), part.rootfs_dir)
+
+ msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
+ msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
+ msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
+
+ msger.info(msg)
+
+ @property
+ def rootdev(self):
+ """
+ Get root device name to use as a 'root' parameter
+ in kernel command line.
+
+ Assume partition order same as in wks
+ """
+ parts = self._get_parts()
+ for num, part in enumerate(parts, 1):
+ if part.mountpoint == "/":
+ if part.uuid:
+ return "PARTUUID=%s" % part.uuid
+ else:
+ suffix = 'p' if part.disk.startswith('mmcblk') else ''
+ pnum = self.__get_part_num(num, parts)
+ return "/dev/%s%s%-d" % (part.disk, suffix, pnum)
+
+ def _cleanup(self):
+ if not self.__image is None:
+ try:
+ self.__image.cleanup()
+ except ImageError, err:
+ msger.warning("%s" % err)
+
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
new file mode 100644
index 0000000..8c3f808
--- /dev/null
+++ b/scripts/lib/wic/ksparser.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python -tt
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2016 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This module provides parser for kickstart format
+#
+# AUTHORS
+# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
+
+"""Kickstart parser module."""
+
+import os
+import shlex
+from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
+
+from wic import msger
+from wic.partition import Partition
+from wic.utils.misc import find_canned
+
+class KickStartError(Exception):
+ """Custom exception."""
+ pass
+
+class KickStartParser(ArgumentParser):
+ """
+ This class overwrites error method to throw exception
+ instead of producing usage message(default argparse behavior).
+ """
+ def error(self, message):
+ raise ArgumentError(None, message)
+
+def sizetype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts size string in <num>[K|k|M|G] format into the integer value
+ """
+ if arg.isdigit():
+ return int(arg) * 1024L
+
+ if not arg[:-1].isdigit():
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+
+ size = int(arg[:-1])
+ if arg.endswith("k") or arg.endswith("K"):
+ return size
+ if arg.endswith("M"):
+ return size * 1024L
+ if arg.endswith("G"):
+ return size * 1024L * 1024L
+
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+
+def overheadtype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts overhead string to float and checks if it's bigger than 1.0
+ """
+ try:
+ result = float(arg)
+ except ValueError:
+ raise ArgumentTypeError("Invalid value: %r" % arg)
+
+ if result < 1.0:
+ raise ArgumentTypeError("Overhead factor should be > 1.0" % arg)
+
+ return result
+
+def cannedpathtype(arg):
+ """
+ Custom type for ArgumentParser
+ Tries to find file in the list of canned wks paths
+ """
+ scripts_path = os.path.abspath(os.path.dirname(__file__) + '../../..')
+ result = find_canned(scripts_path, arg)
+ if not result:
+ raise ArgumentTypeError("file not found: %s" % arg)
+ return result
+
+class KickStart(object):
+ """"Kickstart parser implementation."""
+
+ def __init__(self, confpath):
+
+ self.partitions = []
+ self.bootloader = None
+ self.lineno = 0
+ self.partnum = 0
+
+ parser = KickStartParser()
+ subparsers = parser.add_subparsers()
+
+ part = subparsers.add_parser('part')
+ part.add_argument('mountpoint')
+ part.add_argument('--active', action='store_true')
+ part.add_argument('--align', type=int)
+ part.add_argument("--extra-space", type=sizetype, default=10*1024L)
+ part.add_argument('--fsoptions', dest='fsopts')
+ part.add_argument('--fstype')
+ part.add_argument('--label')
+ part.add_argument('--no-table', action='store_true')
+ part.add_argument('--ondisk', '--ondrive', dest='disk')
+ part.add_argument("--overhead-factor", type=overheadtype, default=1.3)
+ part.add_argument('--part-type')
+ part.add_argument('--rootfs-dir')
+ part.add_argument('--size', type=sizetype, default=0)
+ part.add_argument('--source')
+ part.add_argument('--sourceparams')
+ part.add_argument('--use-uuid', action='store_true')
+ part.add_argument('--uuid')
+
+ bootloader = subparsers.add_parser('bootloader')
+ bootloader.add_argument('--append')
+ bootloader.add_argument('--configfile')
+ bootloader.add_argument('--ptable', choices=('msdos', 'gpt'),
+ default='msdos')
+ bootloader.add_argument('--timeout', type=int)
+ bootloader.add_argument('--source')
+
+ include = subparsers.add_parser('include')
+ include.add_argument('path', type=cannedpathtype)
+
+ self._parse(parser, confpath)
+ if not self.bootloader:
+ msger.warning('bootloader config not specified, using defaults')
+ self.bootloader = bootloader.parse_args([])
+
+ def _parse(self, parser, confpath):
+ """
+ Parse file in .wks format using provided parser.
+ """
+ with open(confpath) as conf:
+ lineno = 0
+ for line in conf:
+ line = line.strip()
+ lineno += 1
+ if line and line[0] != '#':
+ try:
+ parsed = parser.parse_args(shlex.split(line))
+ except ArgumentError as err:
+ raise KickStartError('%s:%d: %s' % \
+ (confpath, lineno, err))
+ if line.startswith('part'):
+ self.partnum += 1
+ self.partitions.append(Partition(parsed, self.partnum))
+ elif line.startswith('include'):
+ self._parse(parser, parsed.path)
+ elif line.startswith('bootloader'):
+ if not self.bootloader:
+ self.bootloader = parsed
+ else:
+ err = "%s:%d: more than one bootloader specified" \
+ % (confpath, lineno)
+ raise KickStartError(err)
diff --git a/scripts/lib/wic/msger.py b/scripts/lib/wic/msger.py
new file mode 100644
index 0000000..b737554
--- /dev/null
+++ b/scripts/lib/wic/msger.py
@@ -0,0 +1,309 @@
+#!/usr/bin/env python -tt
+# vim: ai ts=4 sts=4 et sw=4
+#
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import sys
+import re
+import time
+
+__ALL__ = ['set_mode',
+ 'get_loglevel',
+ 'set_loglevel',
+ 'set_logfile',
+ 'raw',
+ 'debug',
+ 'verbose',
+ 'info',
+ 'warning',
+ 'error',
+ 'ask',
+ 'pause',
+ ]
+
+# COLORs in ANSI
+INFO_COLOR = 32 # green
+WARN_COLOR = 33 # yellow
+ERR_COLOR = 31 # red
+ASK_COLOR = 34 # blue
+NO_COLOR = 0
+
+PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S)
+
+INTERACTIVE = True
+
+LOG_LEVEL = 1
+LOG_LEVELS = {
+ 'quiet': 0,
+ 'normal': 1,
+ 'verbose': 2,
+ 'debug': 3,
+ 'never': 4,
+}
+
+LOG_FILE_FP = None
+LOG_CONTENT = ''
+CATCHERR_BUFFILE_FD = -1
+CATCHERR_BUFFILE_PATH = None
+CATCHERR_SAVED_2 = -1
+
+def _general_print(head, color, msg=None, stream=None, level='normal'):
+ global LOG_CONTENT
+ if not stream:
+ stream = sys.stdout
+
+ if LOG_LEVELS[level] > LOG_LEVEL:
+ # skip
+ return
+
+ # encode raw 'unicode' str to utf8 encoded str
+ if msg and isinstance(msg, unicode):
+ msg = msg.encode('utf-8', 'ignore')
+
+ errormsg = ''
+ if CATCHERR_BUFFILE_FD > 0:
+ size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
+ os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
+ errormsg = os.read(CATCHERR_BUFFILE_FD, size)
+ os.ftruncate(CATCHERR_BUFFILE_FD, 0)
+
+ # append error msg to LOG
+ if errormsg:
+ LOG_CONTENT += errormsg
+
+ # append normal msg to LOG
+ save_msg = msg.strip() if msg else None
+ if save_msg:
+ timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
+ LOG_CONTENT += timestr + save_msg + '\n'
+
+ if errormsg:
+ _color_print('', NO_COLOR, errormsg, stream, level)
+
+ _color_print(head, color, msg, stream, level)
+
+def _color_print(head, color, msg, stream, level):
+ colored = True
+ if color == NO_COLOR or \
+ not stream.isatty() or \
+ os.getenv('ANSI_COLORS_DISABLED') is not None:
+ colored = False
+
+ if head.startswith('\r'):
+ # need not \n at last
+ newline = False
+ else:
+ newline = True
+
+ if colored:
+ head = '\033[%dm%s:\033[0m ' %(color, head)
+ if not newline:
+ # ESC cmd to clear line
+ head = '\033[2K' + head
+ else:
+ if head:
+ head += ': '
+ if head.startswith('\r'):
+ head = head.lstrip()
+ newline = True
+
+ if msg is not None:
+ if isinstance(msg, unicode):
+ msg = msg.encode('utf8', 'ignore')
+
+ stream.write('%s%s' % (head, msg))
+ if newline:
+ stream.write('\n')
+
+ stream.flush()
+
+def _color_perror(head, color, msg, level='normal'):
+ if CATCHERR_BUFFILE_FD > 0:
+ _general_print(head, color, msg, sys.stdout, level)
+ else:
+ _general_print(head, color, msg, sys.stderr, level)
+
+def _split_msg(head, msg):
+ if isinstance(msg, list):
+ msg = '\n'.join(map(str, msg))
+
+ if msg.startswith('\n'):
+ # means print \n at first
+ msg = msg.lstrip()
+ head = '\n' + head
+
+ elif msg.startswith('\r'):
+ # means print \r at first
+ msg = msg.lstrip()
+ head = '\r' + head
+
+ match = PREFIX_RE.match(msg)
+ if match:
+ head += ' <%s>' % match.group(1)
+ msg = match.group(2)
+
+ return head, msg
+
+def get_loglevel():
+ return (k for k, v in LOG_LEVELS.items() if v == LOG_LEVEL).next()
+
+def set_loglevel(level):
+ global LOG_LEVEL
+ if level not in LOG_LEVELS:
+ # no effect
+ return
+
+ LOG_LEVEL = LOG_LEVELS[level]
+
+def set_interactive(mode=True):
+ global INTERACTIVE
+ if mode:
+ INTERACTIVE = True
+ else:
+ INTERACTIVE = False
+
+def log(msg=''):
+ # log msg to LOG_CONTENT then save to logfile
+ global LOG_CONTENT
+ if msg:
+ LOG_CONTENT += msg
+
+def raw(msg=''):
+ _general_print('', NO_COLOR, msg)
+
+def info(msg):
+ head, msg = _split_msg('Info', msg)
+ _general_print(head, INFO_COLOR, msg)
+
+def verbose(msg):
+ head, msg = _split_msg('Verbose', msg)
+ _general_print(head, INFO_COLOR, msg, level='verbose')
+
+def warning(msg):
+ head, msg = _split_msg('Warning', msg)
+ _color_perror(head, WARN_COLOR, msg)
+
+def debug(msg):
+ head, msg = _split_msg('Debug', msg)
+ _color_perror(head, ERR_COLOR, msg, level='debug')
+
+def error(msg):
+ head, msg = _split_msg('Error', msg)
+ _color_perror(head, ERR_COLOR, msg)
+ sys.exit(1)
+
+def ask(msg, default=True):
+ _general_print('\rQ', ASK_COLOR, '')
+ try:
+ if default:
+ msg += '(Y/n) '
+ else:
+ msg += '(y/N) '
+ if INTERACTIVE:
+ while True:
+ repl = raw_input(msg)
+ if repl.lower() == 'y':
+ return True
+ elif repl.lower() == 'n':
+ return False
+ elif not repl.strip():
+ # <Enter>
+ return default
+
+ # else loop
+ else:
+ if default:
+ msg += ' Y'
+ else:
+ msg += ' N'
+ _general_print('', NO_COLOR, msg)
+
+ return default
+ except KeyboardInterrupt:
+ sys.stdout.write('\n')
+ sys.exit(2)
+
+def choice(msg, choices, default=0):
+ if default >= len(choices):
+ return None
+ _general_print('\rQ', ASK_COLOR, '')
+ try:
+ msg += " [%s] " % '/'.join(choices)
+ if INTERACTIVE:
+ while True:
+ repl = raw_input(msg)
+ if repl in choices:
+ return repl
+ elif not repl.strip():
+ return choices[default]
+ else:
+ msg += choices[default]
+ _general_print('', NO_COLOR, msg)
+
+ return choices[default]
+ except KeyboardInterrupt:
+ sys.stdout.write('\n')
+ sys.exit(2)
+
+def pause(msg=None):
+ if INTERACTIVE:
+ _general_print('\rQ', ASK_COLOR, '')
+ if msg is None:
+ msg = 'press <ENTER> to continue ...'
+ raw_input(msg)
+
+def set_logfile(fpath):
+ global LOG_FILE_FP
+
+ def _savelogf():
+ if LOG_FILE_FP:
+ with open(LOG_FILE_FP, 'w') as log:
+ log.write(LOG_CONTENT)
+
+ if LOG_FILE_FP is not None:
+ warning('duplicate log file configuration')
+
+ LOG_FILE_FP = fpath
+
+ import atexit
+ atexit.register(_savelogf)
+
+def enable_logstderr(fpath):
+ global CATCHERR_BUFFILE_FD
+ global CATCHERR_BUFFILE_PATH
+ global CATCHERR_SAVED_2
+
+ if os.path.exists(fpath):
+ os.remove(fpath)
+ CATCHERR_BUFFILE_PATH = fpath
+ CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT)
+ CATCHERR_SAVED_2 = os.dup(2)
+ os.dup2(CATCHERR_BUFFILE_FD, 2)
+
+def disable_logstderr():
+ global CATCHERR_BUFFILE_FD
+ global CATCHERR_BUFFILE_PATH
+ global CATCHERR_SAVED_2
+
+ raw(msg=None) # flush message buffer and print it.
+ os.dup2(CATCHERR_SAVED_2, 2)
+ os.close(CATCHERR_SAVED_2)
+ os.close(CATCHERR_BUFFILE_FD)
+ os.unlink(CATCHERR_BUFFILE_PATH)
+ CATCHERR_BUFFILE_FD = -1
+ CATCHERR_BUFFILE_PATH = None
+ CATCHERR_SAVED_2 = -1
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
new file mode 100644
index 0000000..f40d1bc
--- /dev/null
+++ b/scripts/lib/wic/partition.py
@@ -0,0 +1,414 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013-2016 Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides the OpenEmbedded partition object definitions.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
+
+import os
+import tempfile
+import uuid
+
+from wic.utils.oe.misc import msger, parse_sourceparams
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd
+from wic.plugin import pluginmgr
+
+partition_methods = {
+ "do_stage_partition":None,
+ "do_prepare_partition":None,
+ "do_configure_partition":None,
+}
+
+class Partition(object):
+
+ def __init__(self, args, lineno):
+ self.args = args
+ self.active = args.active
+ self.align = args.align
+ self.disk = args.disk
+ self.extra_space = args.extra_space
+ self.fsopts = args.fsopts
+ self.fstype = args.fstype
+ self.label = args.label
+ self.mountpoint = args.mountpoint
+ self.no_table = args.no_table
+ self.overhead_factor = args.overhead_factor
+ self.part_type = args.part_type
+ self.rootfs_dir = args.rootfs_dir
+ self.size = args.size
+ self.source = args.source
+ self.sourceparams = args.sourceparams
+ self.use_uuid = args.use_uuid
+ self.uuid = args.uuid
+ if args.use_uuid and not self.uuid:
+ self.uuid = str(uuid.uuid4())
+
+ self.lineno = lineno
+ self.source_file = ""
+ self.sourceparams_dict = {}
+
+ def get_extra_block_count(self, current_blocks):
+ """
+ The --size param is reflected in self.size (in kB), and we already
+ have current_blocks (1k) blocks, calculate and return the
+ number of (1k) blocks we need to add to get to --size, 0 if
+ we're already there or beyond.
+ """
+ msger.debug("Requested partition size for %s: %d" % \
+ (self.mountpoint, self.size))
+
+ if not self.size:
+ return 0
+
+ requested_blocks = self.size
+
+ msger.debug("Requested blocks %d, current_blocks %d" % \
+ (requested_blocks, current_blocks))
+
+ if requested_blocks > current_blocks:
+ return requested_blocks - current_blocks
+ else:
+ return 0
+
+ def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Prepare content for individual partitions, depending on
+ partition command parameters.
+ """
+ if self.sourceparams:
+ self.sourceparams_dict = parse_sourceparams(self.sourceparams)
+
+ if not self.source:
+ if not self.size:
+ msger.error("The %s partition has a size of zero. Please "
+ "specify a non-zero --size for that partition." % \
+ self.mountpoint)
+ if self.fstype and self.fstype == "swap":
+ self.prepare_swap_partition(cr_workdir, oe_builddir,
+ native_sysroot)
+ self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
+ elif self.fstype:
+ rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
+ self.lineno, self.fstype)
+ if os.path.isfile(rootfs):
+ os.remove(rootfs)
+ for prefix in ("ext", "btrfs", "vfat", "squashfs"):
+ if self.fstype.startswith(prefix):
+ method = getattr(self,
+ "prepare_empty_partition_" + prefix)
+ method(rootfs, oe_builddir, native_sysroot)
+ self.source_file = rootfs
+ break
+ return
+
+ plugins = pluginmgr.get_source_plugins()
+
+ if self.source not in plugins:
+ msger.error("The '%s' --source specified for %s doesn't exist.\n\t"
+ "See 'wic list source-plugins' for a list of available"
+ " --sources.\n\tSee 'wic help source-plugins' for "
+ "details on adding a new source plugin." % \
+ (self.source, self.mountpoint))
+
+ self._source_methods = pluginmgr.get_source_plugin_methods(\
+ self.source, partition_methods)
+ self._source_methods["do_configure_partition"](self, self.sourceparams_dict,
+ creator, cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+ self._source_methods["do_stage_partition"](self, self.sourceparams_dict,
+ creator, cr_workdir,
+ oe_builddir,
+ bootimg_dir, kernel_dir,
+ native_sysroot)
+ self._source_methods["do_prepare_partition"](self, self.sourceparams_dict,
+ creator, cr_workdir,
+ oe_builddir,
+ bootimg_dir, kernel_dir, rootfs_dir,
+ native_sysroot)
+
+ def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir,
+ rootfs_dir):
+ """
+ Handle an already-created partition e.g. xxx.ext3
+ """
+ rootfs = oe_builddir
+ du_cmd = "du -Lbks %s" % rootfs
+ out = exec_cmd(du_cmd)
+ rootfs_size = out.split()[0]
+
+ self.size = rootfs_size
+ self.source_file = rootfs
+
+ def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
+ native_sysroot):
+ """
+ Prepare content for a rootfs partition i.e. create a partition
+ and fill it from a /rootfs dir.
+
+ Currently handles ext2/3/4, btrfs and vfat.
+ """
+ p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
+ p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
+ "%s/../pseudo" % rootfs_dir)
+ p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
+ p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
+ pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
+ pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
+ pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
+ pseudo += "%s/usr/bin/pseudo " % native_sysroot
+
+ rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
+ self.lineno, self.fstype)
+ if os.path.isfile(rootfs):
+ os.remove(rootfs)
+
+ for prefix in ("ext", "btrfs", "vfat", "squashfs"):
+ if self.fstype.startswith(prefix):
+ method = getattr(self, "prepare_rootfs_" + prefix)
+ method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+
+ self.source_file = rootfs
+
+ # get the rootfs size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % rootfs
+ out = exec_cmd(du_cmd)
+ self.size = out.split()[0]
+
+ break
+
+ def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for an ext2/3/4 rootfs partition.
+ """
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ actual_rootfs_size = int(out.split()[0])
+
+ extra_blocks = self.get_extra_block_count(actual_rootfs_size)
+ if extra_blocks < self.extra_space:
+ extra_blocks = self.extra_space
+
+ rootfs_size = actual_rootfs_size + extra_blocks
+ rootfs_size *= self.overhead_factor
+
+ msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
+ (extra_blocks, self.mountpoint, rootfs_size))
+
+ dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
+ (rootfs, rootfs_size)
+ exec_cmd(dd_cmd)
+
+ extra_imagecmd = "-i 8192"
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -F %s %s %s -d %s" % \
+ (self.fstype, extra_imagecmd, rootfs, label_str, rootfs_dir)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a btrfs rootfs partition.
+
+ Currently handles ext2/3/4 and btrfs.
+ """
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ actual_rootfs_size = int(out.split()[0])
+
+ extra_blocks = self.get_extra_block_count(actual_rootfs_size)
+ if extra_blocks < self.extra_space:
+ extra_blocks = self.extra_space
+
+ rootfs_size = actual_rootfs_size + extra_blocks
+ rootfs_size *= self.overhead_factor
+
+ msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
+ (extra_blocks, self.mountpoint, rootfs_size))
+
+ dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
+ (rootfs, rootfs_size)
+ exec_cmd(dd_cmd)
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -b %d -r %s %s %s" % \
+ (self.fstype, rootfs_size * 1024, rootfs_dir, label_str, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_rootfs_vfat(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a vfat rootfs partition.
+ """
+ du_cmd = "du -bks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ extra_blocks = self.get_extra_block_count(blocks)
+ if extra_blocks < self.extra_space:
+ extra_blocks = self.extra_space
+
+ blocks += extra_blocks
+
+ msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
+ (extra_blocks, self.mountpoint, blocks))
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation
+ # is done in blocks, thus the mod by 16 instead of 32. Apply
+ # sector count fix only when needed.
+ if blocks % 16 != 0:
+ blocks += (16 - (blocks % 16))
+
+ label_str = "-n boot"
+ if self.label:
+ label_str = "-n %s" % self.label
+
+ dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % rootfs
+ exec_cmd(chmod_cmd)
+
+ def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a squashfs rootfs partition.
+ """
+ squashfs_cmd = "mksquashfs %s %s -noappend" % \
+ (rootfs_dir, rootfs)
+ exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_empty_partition_ext(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty ext2/3/4 partition.
+ """
+ dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
+ (rootfs, self.size)
+ exec_cmd(dd_cmd)
+
+ extra_imagecmd = "-i 8192"
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -F %s %s %s" % \
+ (self.fstype, extra_imagecmd, label_str, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot)
+
+ def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty btrfs partition.
+ """
+ dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
+ (rootfs, self.size)
+ exec_cmd(dd_cmd)
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -b %d %s %s" % \
+ (self.fstype, self.size * 1024, label_str, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot)
+
+ def prepare_empty_partition_vfat(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty vfat partition.
+ """
+ blocks = self.size
+
+ label_str = "-n boot"
+ if self.label:
+ label_str = "-n %s" % self.label
+
+ dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % rootfs
+ exec_cmd(chmod_cmd)
+
+ def prepare_empty_partition_squashfs(self, cr_workdir, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty squashfs partition.
+ """
+ msger.warning("Creating of an empty squashfs %s partition was attempted. " \
+ "Proceeding as requested." % self.mountpoint)
+
+ path = "%s/fs_%s.%s" % (cr_workdir, self.label, self.fstype)
+ os.path.isfile(path) and os.remove(path)
+
+ # it is not possible to create a squashfs without source data,
+ # thus prepare an empty temp dir that is used as source
+ tmpdir = tempfile.mkdtemp()
+
+ squashfs_cmd = "mksquashfs %s %s -noappend" % \
+ (tmpdir, path)
+ exec_native_cmd(squashfs_cmd, native_sysroot)
+
+ os.rmdir(tmpdir)
+
+ # get the rootfs size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % path
+ out = exec_cmd(du_cmd)
+ fs_size = out.split()[0]
+
+ self.size = fs_size
+
+ def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
+ """
+ Prepare a swap partition.
+ """
+ path = "%s/fs.%s" % (cr_workdir, self.fstype)
+
+ dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
+ (path, self.size)
+ exec_cmd(dd_cmd)
+
+ import uuid
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+ mkswap_cmd = "mkswap %s -U %s %s" % (label_str, str(uuid.uuid1()), path)
+ exec_native_cmd(mkswap_cmd, native_sysroot)
+
diff --git a/scripts/lib/wic/plugin.py b/scripts/lib/wic/plugin.py
new file mode 100644
index 0000000..151ff31
--- /dev/null
+++ b/scripts/lib/wic/plugin.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os, sys
+
+from wic import msger
+from wic import pluginbase
+from wic.utils import errors
+from wic.utils.oe.misc import get_bitbake_var
+
+__ALL__ = ['PluginMgr', 'pluginmgr']
+
+PLUGIN_TYPES = ["imager", "source"]
+
+PLUGIN_DIR = "/lib/wic/plugins" # relative to scripts
+SCRIPTS_PLUGIN_DIR = "scripts" + PLUGIN_DIR
+
+class PluginMgr(object):
+ plugin_dirs = {}
+
+ # make the manager class as singleton
+ _instance = None
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+ def __init__(self):
+ wic_path = os.path.dirname(__file__)
+ eos = wic_path.rfind('scripts') + len('scripts')
+ scripts_path = wic_path[:eos]
+ self.scripts_path = scripts_path
+ self.plugin_dir = scripts_path + PLUGIN_DIR
+ self.layers_path = None
+
+ def _build_plugin_dir_list(self, plugin_dir, ptype):
+ if self.layers_path is None:
+ self.layers_path = get_bitbake_var("BBLAYERS")
+ layer_dirs = []
+
+ if self.layers_path is not None:
+ for layer_path in self.layers_path.split():
+ path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR, ptype)
+ layer_dirs.append(path)
+
+ path = os.path.join(plugin_dir, ptype)
+ layer_dirs.append(path)
+
+ return layer_dirs
+
+ def append_dirs(self, dirs):
+ for path in dirs:
+ self._add_plugindir(path)
+
+ # load all the plugins AGAIN
+ self._load_all()
+
+ def _add_plugindir(self, path):
+ path = os.path.abspath(os.path.expanduser(path))
+
+ if not os.path.isdir(path):
+ return
+
+ if path not in self.plugin_dirs:
+ self.plugin_dirs[path] = False
+ # the value True/False means "loaded"
+
+ def _load_all(self):
+ for (pdir, loaded) in self.plugin_dirs.iteritems():
+ if loaded:
+ continue
+
+ sys.path.insert(0, pdir)
+ for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]:
+ if mod and mod != '__init__':
+ if mod in sys.modules:
+ #self.plugin_dirs[pdir] = True
+ msger.warning("Module %s already exists, skip" % mod)
+ else:
+ try:
+ pymod = __import__(mod)
+ self.plugin_dirs[pdir] = True
+ msger.debug("Plugin module %s:%s imported"\
+ % (mod, pymod.__file__))
+ except ImportError, err:
+ msg = 'Failed to load plugin %s/%s: %s' \
+ % (os.path.basename(pdir), mod, err)
+ msger.warning(msg)
+
+ del sys.path[0]
+
+ def get_plugins(self, ptype):
+ """ the return value is dict of name:class pairs """
+
+ if ptype not in PLUGIN_TYPES:
+ raise errors.CreatorError('%s is not valid plugin type' % ptype)
+
+ plugins_dir = self._build_plugin_dir_list(self.plugin_dir, ptype)
+
+ self.append_dirs(plugins_dir)
+
+ return pluginbase.get_plugins(ptype)
+
+ def get_source_plugins(self):
+ """
+ Return list of available source plugins.
+ """
+ plugins_dir = self._build_plugin_dir_list(self.plugin_dir, 'source')
+
+ self.append_dirs(plugins_dir)
+
+ return self.get_plugins('source')
+
+
+ def get_source_plugin_methods(self, source_name, methods):
+ """
+ The methods param is a dict with the method names to find. On
+ return, the dict values will be filled in with pointers to the
+ corresponding methods. If one or more methods are not found,
+ None is returned.
+ """
+ return_methods = None
+ for _source_name, klass in self.get_plugins('source').iteritems():
+ if _source_name == source_name:
+ for _method_name in methods.keys():
+ if not hasattr(klass, _method_name):
+ msger.warning("Unimplemented %s source interface for: %s"\
+ % (_method_name, _source_name))
+ return None
+ func = getattr(klass, _method_name)
+ methods[_method_name] = func
+ return_methods = methods
+ return return_methods
+
+pluginmgr = PluginMgr()
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
new file mode 100644
index 0000000..ee8fe95
--- /dev/null
+++ b/scripts/lib/wic/pluginbase.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+from wic import msger
+
+class _Plugin(object):
+ class __metaclass__(type):
+ def __init__(cls, name, bases, attrs):
+ if not hasattr(cls, 'plugins'):
+ cls.plugins = {}
+
+ elif 'wic_plugin_type' in attrs:
+ if attrs['wic_plugin_type'] not in cls.plugins:
+ cls.plugins[attrs['wic_plugin_type']] = {}
+
+ elif hasattr(cls, 'wic_plugin_type') and 'name' in attrs:
+ cls.plugins[cls.wic_plugin_type][attrs['name']] = cls
+
+ def show_plugins(cls):
+ for cls in cls.plugins[cls.wic_plugin_type]:
+ print cls
+
+ def get_plugins(cls):
+ return cls.plugins
+
+
+class ImagerPlugin(_Plugin):
+ wic_plugin_type = "imager"
+
+
+class SourcePlugin(_Plugin):
+ wic_plugin_type = "source"
+ """
+ The methods that can be implemented by --source plugins.
+
+ Any methods not implemented in a subclass inherit these.
+ """
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. This provides a hook to allow finalization of a
+ disk image e.g. to write an MBR to it.
+ """
+ msger.debug("SourcePlugin: do_install_disk: disk: %s" % disk_name)
+
+ @classmethod
+ def do_stage_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Special content staging hook called before do_prepare_partition(),
+ normally empty.
+
+ Typically, a partition will just use the passed-in parame e.g
+ straight bootimg_dir, etc, but in some cases, things need to
+ be more tailored e.g. to use a deploy dir + /boot, etc. This
+ hook allows those files to be staged in a customized fashion.
+ Not that get_bitbake_var() allows you to acces non-standard
+ variables that you might want to use for this.
+ """
+ msger.debug("SourcePlugin: do_stage_partition: part: %s" % part)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), typically used to create
+ custom configuration files for a partition, for example
+ syslinux or grub config files.
+ """
+ msger.debug("SourcePlugin: do_configure_partition: part: %s" % part)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
+ native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part)
+
+def get_plugins(typen):
+ plugins = ImagerPlugin.get_plugins()
+ if typen in plugins:
+ return plugins[typen]
+ else:
+ return None
+
+__all__ = ['ImagerPlugin', 'SourcePlugin', 'get_plugins']
diff --git a/scripts/lib/wic/plugins/imager/direct_plugin.py b/scripts/lib/wic/plugins/imager/direct_plugin.py
new file mode 100644
index 0000000..6d3f46c
--- /dev/null
+++ b/scripts/lib/wic/plugins/imager/direct_plugin.py
@@ -0,0 +1,102 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'direct' imager plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+from wic.utils import errors
+from wic.conf import configmgr
+
+import wic.imager.direct as direct
+from wic.pluginbase import ImagerPlugin
+
+class DirectPlugin(ImagerPlugin):
+ """
+ Install a system into a file containing a partitioned disk image.
+
+ An image file is formatted with a partition table, each partition
+ created from a rootfs or other OpenEmbedded build artifact and dd'ed
+ into the virtual disk. The disk image can subsequently be dd'ed onto
+ media and used on actual hardware.
+ """
+
+ name = 'direct'
+
+ @classmethod
+ def __rootfs_dir_to_dict(cls, rootfs_dirs):
+ """
+ Gets a string that contain 'connection=dir' splitted by
+ space and return a dict
+ """
+ krootfs_dir = {}
+ for rootfs_dir in rootfs_dirs.split(' '):
+ key, val = rootfs_dir.split('=')
+ krootfs_dir[key] = val
+
+ return krootfs_dir
+
+ @classmethod
+ def do_create(cls, opts, *args):
+ """
+ Create direct image, called from creator as 'direct' cmd
+ """
+ if len(args) != 8:
+ raise errors.Usage("Extra arguments given")
+
+ native_sysroot = args[0]
+ kernel_dir = args[1]
+ bootimg_dir = args[2]
+ rootfs_dir = args[3]
+
+ creatoropts = configmgr.create
+ ksconf = args[4]
+
+ image_output_dir = args[5]
+ oe_builddir = args[6]
+ compressor = args[7]
+
+ krootfs_dir = cls.__rootfs_dir_to_dict(rootfs_dir)
+
+ configmgr._ksconf = ksconf
+
+ creator = direct.DirectImageCreator(oe_builddir,
+ image_output_dir,
+ krootfs_dir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot,
+ compressor,
+ creatoropts)
+
+ try:
+ creator.create()
+ creator.assemble()
+ creator.finalize()
+ creator.print_outimage_info()
+
+ except errors.CreatorError:
+ raise
+ finally:
+ creator.cleanup()
+
+ return 0
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
new file mode 100644
index 0000000..a4734c9
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -0,0 +1,237 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-efi' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import os
+import shutil
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.misc import get_custom_config
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var, \
+ BOOTDD_EXTRA_SPACE
+
+class BootimgEFIPlugin(SourcePlugin):
+ """
+ Create EFI boot partition.
+ This plugin supports GRUB 2 and gummiboot bootloaders.
+ """
+
+ name = 'bootimg-efi'
+
+ @classmethod
+ def do_configure_grubefi(cls, hdddir, creator, cr_workdir):
+ """
+ Create loader-specific (grub-efi) config
+ """
+ configfile = creator.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for grub
+ grubefi_conf = custom_cfg
+ msger.debug("Using custom configuration file "
+ "%s for grub.cfg" % configfile)
+ else:
+ msger.error("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ if not custom_cfg:
+ # Create grub configuration using parameters from wks file
+ bootloader = creator.ks.bootloader
+
+ grubefi_conf = ""
+ grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
+ grubefi_conf += "default=boot\n"
+ grubefi_conf += "timeout=%s\n" % bootloader.timeout
+ grubefi_conf += "menuentry 'boot'{\n"
+
+ kernel = "/bzImage"
+
+ grubefi_conf += "linux %s root=%s rootwait %s\n" \
+ % (kernel, creator.rootdev, bootloader.append)
+ grubefi_conf += "}\n"
+
+ msger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg" \
+ % cr_workdir)
+ cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
+ cfg.write(grubefi_conf)
+ cfg.close()
+
+ @classmethod
+ def do_configure_gummiboot(cls, hdddir, creator, cr_workdir):
+ """
+ Create loader-specific (gummiboot) config
+ """
+ install_cmd = "install -d %s/loader" % hdddir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -d %s/loader/entries" % hdddir
+ exec_cmd(install_cmd)
+
+ bootloader = creator.ks.bootloader
+
+ loader_conf = ""
+ loader_conf += "default boot\n"
+ loader_conf += "timeout %d\n" % bootloader.timeout
+
+ msger.debug("Writing gummiboot config %s/hdd/boot/loader/loader.conf" \
+ % cr_workdir)
+ cfg = open("%s/hdd/boot/loader/loader.conf" % cr_workdir, "w")
+ cfg.write(loader_conf)
+ cfg.close()
+
+ configfile = creator.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for gummiboot
+ boot_conf = custom_cfg
+ msger.debug("Using custom configuration file "
+ "%s for gummiboots's boot.conf" % configfile)
+ else:
+ msger.error("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ if not custom_cfg:
+ # Create gummiboot configuration using parameters from wks file
+ kernel = "/bzImage"
+
+ boot_conf = ""
+ boot_conf += "title boot\n"
+ boot_conf += "linux %s\n" % kernel
+ boot_conf += "options LABEL=Boot root=%s %s\n" % \
+ (creator.rootdev, bootloader.append)
+
+ msger.debug("Writing gummiboot config %s/hdd/boot/loader/entries/boot.conf" \
+ % cr_workdir)
+ cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
+ cfg.write(boot_conf)
+ cfg.close()
+
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates loader-specific config
+ """
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -d %s/EFI/BOOT" % hdddir
+ exec_cmd(install_cmd)
+
+ try:
+ if source_params['loader'] == 'grub-efi':
+ cls.do_configure_grubefi(hdddir, creator, cr_workdir)
+ elif source_params['loader'] == 'gummiboot':
+ cls.do_configure_gummiboot(hdddir, creator, cr_workdir)
+ else:
+ msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+ except KeyError:
+ msger.error("bootimg-efi requires a loader, none specified")
+
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for an EFI (grub) boot partition.
+ """
+ if not bootimg_dir:
+ bootimg_dir = get_bitbake_var("HDDDIR")
+ if not bootimg_dir:
+ msger.error("Couldn't find HDDDIR, exiting\n")
+ # just so the result notes display it
+ creator.set_bootimg_dir(bootimg_dir)
+
+ staging_kernel_dir = kernel_dir
+
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
+ (staging_kernel_dir, hdddir)
+ exec_cmd(install_cmd)
+
+ try:
+ if source_params['loader'] == 'grub-efi':
+ shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
+ "%s/grub.cfg" % cr_workdir)
+ cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
+ exec_cmd(cp_cmd, True)
+ shutil.move("%s/grub.cfg" % cr_workdir,
+ "%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
+ elif source_params['loader'] == 'gummiboot':
+ cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+ except KeyError:
+ msger.error("bootimg-efi requires a loader, none specified")
+
+ du_cmd = "du -bks %s" % hdddir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ extra_blocks = part.get_extra_block_count(blocks)
+
+ if extra_blocks < BOOTDD_EXTRA_SPACE:
+ extra_blocks = BOOTDD_EXTRA_SPACE
+
+ blocks += extra_blocks
+
+ msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
+ (extra_blocks, part.mountpoint, blocks))
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ blocks += (16 - (blocks % 16))
+
+ # dosfs image, created by mkdosfs
+ bootimg = "%s/boot.img" % cr_workdir
+
+ dosfs_cmd = "mkdosfs -n efi -C %s %d" % (bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ du_cmd = "du -Lbks %s" % bootimg
+ out = exec_cmd(du_cmd)
+ bootimg_size = out.split()[0]
+
+ part.size = bootimg_size
+ part.source_file = bootimg
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
new file mode 100644
index 0000000..b76c121
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -0,0 +1,140 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-partition' source plugin class for
+# 'wic'. The plugin creates an image of boot partition, copying over
+# files listed in IMAGE_BOOT_FILES bitbake variable.
+#
+# AUTHORS
+# Maciej Borzecki <maciej.borzecki (at] open-rnd.pl>
+#
+
+import os
+import re
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.oe.misc import exec_cmd, get_bitbake_var
+from glob import glob
+
+class BootimgPartitionPlugin(SourcePlugin):
+ """
+ Create an image of boot partition, copying over files
+ listed in IMAGE_BOOT_FILES bitbake variable.
+ """
+
+ name = 'bootimg-partition'
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. Do nothing.
+ """
+ pass
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(). Possibly prepare
+ configuration files of some sort.
+
+ """
+ pass
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, does the following:
+ - sets up a vfat partition
+ - copies all files listed in IMAGE_BOOT_FILES variable
+ """
+ hdddir = "%s/boot" % cr_workdir
+ install_cmd = "install -d %s" % hdddir
+ exec_cmd(install_cmd)
+
+ if not bootimg_dir:
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+
+ msger.debug('Bootimg dir: %s' % bootimg_dir)
+
+ boot_files = get_bitbake_var("IMAGE_BOOT_FILES")
+
+ if not boot_files:
+ msger.error('No boot files defined, IMAGE_BOOT_FILES unset')
+
+ msger.debug('Boot files: %s' % boot_files)
+
+ # list of tuples (src_name, dst_name)
+ deploy_files = []
+ for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files):
+ if ';' in src_entry:
+ dst_entry = tuple(src_entry.split(';'))
+ if not dst_entry[0] or not dst_entry[1]:
+ msger.error('Malformed boot file entry: %s' % (src_entry))
+ else:
+ dst_entry = (src_entry, src_entry)
+
+ msger.debug('Destination entry: %r' % (dst_entry,))
+ deploy_files.append(dst_entry)
+
+ for deploy_entry in deploy_files:
+ src, dst = deploy_entry
+ install_task = []
+ if '*' in src:
+ # by default install files under their basename
+ entry_name_fn = os.path.basename
+ if dst != src:
+ # unless a target name was given, then treat name
+ # as a directory and append a basename
+ entry_name_fn = lambda name: \
+ os.path.join(dst,
+ os.path.basename(name))
+
+ srcs = glob(os.path.join(bootimg_dir, src))
+
+ msger.debug('Globbed sources: %s' % (', '.join(srcs)))
+ for entry in srcs:
+ entry_dst_name = entry_name_fn(entry)
+ install_task.append((entry,
+ os.path.join(hdddir,
+ entry_dst_name)))
+ else:
+ install_task = [(os.path.join(bootimg_dir, src),
+ os.path.join(hdddir, dst))]
+
+ for task in install_task:
+ src_path, dst_path = task
+ msger.debug('Install %s as %s' % (os.path.basename(src_path),
+ dst_path))
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (src_path, dst_path)
+ exec_cmd(install_cmd)
+
+ msger.debug('Prepare boot partition using rootfs in %s' % (hdddir))
+ part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
+ native_sysroot)
+
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
new file mode 100644
index 0000000..5b719bf
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -0,0 +1,210 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-pcbios' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import os
+
+from wic.utils.errors import ImageError
+from wic import msger
+from wic.utils import runner
+from wic.utils.misc import get_custom_config
+from wic.pluginbase import SourcePlugin
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd, \
+ get_bitbake_var, BOOTDD_EXTRA_SPACE
+
+class BootimgPcbiosPlugin(SourcePlugin):
+ """
+ Create MBR boot partition and install syslinux on it.
+ """
+
+ name = 'bootimg-pcbios'
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. In this case, we install the MBR.
+ """
+ mbrfile = "%s/syslinux/" % bootimg_dir
+ if creator.ptable_format == 'msdos':
+ mbrfile += "mbr.bin"
+ elif creator.ptable_format == 'gpt':
+ mbrfile += "gptmbr.bin"
+ else:
+ msger.error("Unsupported partition table: %s" % creator.ptable_format)
+
+ if not os.path.exists(mbrfile):
+ msger.error("Couldn't find %s. If using the -e option, do you "
+ "have the right MACHINE set in local.conf? If not, "
+ "is the bootimg_dir path correct?" % mbrfile)
+
+ full_path = creator._full_path(workdir, disk_name, "direct")
+ msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
+ % (disk_name, full_path, disk['min_size']))
+
+ rcode = runner.show(['dd', 'if=%s' % mbrfile,
+ 'of=%s' % full_path, 'conv=notrunc'])
+ if rcode != 0:
+ raise ImageError("Unable to set MBR to %s" % full_path)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates syslinux config
+ """
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -d %s" % hdddir
+ exec_cmd(install_cmd)
+
+ bootloader = creator.ks.bootloader
+
+ custom_cfg = None
+ if bootloader.configfile:
+ custom_cfg = get_custom_config(bootloader.configfile)
+ if custom_cfg:
+ # Use a custom configuration for grub
+ syslinux_conf = custom_cfg
+ msger.debug("Using custom configuration file "
+ "%s for syslinux.cfg" % bootloader.configfile)
+ else:
+ msger.error("configfile is specified but failed to "
+ "get it from %s." % bootloader.configfile)
+
+ if not custom_cfg:
+ # Create syslinux configuration using parameters from wks file
+ splash = os.path.join(cr_workdir, "/hdd/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ syslinux_conf = ""
+ syslinux_conf += "PROMPT 0\n"
+ syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
+ syslinux_conf += "\n"
+ syslinux_conf += "ALLOWOPTIONS 1\n"
+ syslinux_conf += "SERIAL 0 115200\n"
+ syslinux_conf += "\n"
+ if splashline:
+ syslinux_conf += "%s\n" % splashline
+ syslinux_conf += "DEFAULT boot\n"
+ syslinux_conf += "LABEL boot\n"
+
+ kernel = "/vmlinuz"
+ syslinux_conf += "KERNEL " + kernel + "\n"
+
+ syslinux_conf += "APPEND label=boot root=%s %s\n" % \
+ (creator.rootdev, bootloader.append)
+
+ msger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg" \
+ % cr_workdir)
+ cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
+ cfg.write(syslinux_conf)
+ cfg.close()
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for legacy bios boot partition.
+ """
+ def _has_syslinux(dirname):
+ if dirname:
+ syslinux = "%s/syslinux" % dirname
+ if os.path.exists(syslinux):
+ return True
+ return False
+
+ if not _has_syslinux(bootimg_dir):
+ bootimg_dir = get_bitbake_var("STAGING_DATADIR")
+ if not bootimg_dir:
+ msger.error("Couldn't find STAGING_DATADIR, exiting\n")
+ if not _has_syslinux(bootimg_dir):
+ msger.error("Please build syslinux first\n")
+ # just so the result notes display it
+ creator.set_bootimg_dir(bootimg_dir)
+
+ staging_kernel_dir = kernel_dir
+
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \
+ % (staging_kernel_dir, hdddir)
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \
+ % (bootimg_dir, hdddir)
+ exec_cmd(install_cmd)
+
+ du_cmd = "du -bks %s" % hdddir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ extra_blocks = part.get_extra_block_count(blocks)
+
+ if extra_blocks < BOOTDD_EXTRA_SPACE:
+ extra_blocks = BOOTDD_EXTRA_SPACE
+
+ blocks += extra_blocks
+
+ msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
+ (extra_blocks, part.mountpoint, blocks))
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ blocks += (16 - (blocks % 16))
+
+ # dosfs image, created by mkdosfs
+ bootimg = "%s/boot.img" % cr_workdir
+
+ dosfs_cmd = "mkdosfs -n boot -S 512 -C %s %d" % (bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ syslinux_cmd = "syslinux %s" % bootimg
+ exec_native_cmd(syslinux_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ du_cmd = "du -Lbks %s" % bootimg
+ out = exec_cmd(du_cmd)
+ bootimg_size = out.split()[0]
+
+ part.size = int(out.split()[0])
+ part.source_file = bootimg
+
+
diff --git a/scripts/lib/wic/plugins/source/fsimage.py b/scripts/lib/wic/plugins/source/fsimage.py
new file mode 100644
index 0000000..f894e89
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/fsimage.py
@@ -0,0 +1,73 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import os
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.oe.misc import get_bitbake_var
+
+class FSImagePlugin(SourcePlugin):
+ """
+ Add an already existing filesystem image to the partition layout.
+ """
+
+ name = 'fsimage'
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. Do nothing.
+ """
+ pass
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(). Possibly prepare
+ configuration files of some sort.
+ """
+ pass
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ if not bootimg_dir:
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+
+ msger.debug('Bootimg dir: %s' % bootimg_dir)
+
+ if 'file' not in source_params:
+ msger.error("No file specified\n")
+ return
+
+ src = os.path.join(bootimg_dir, source_params['file'])
+
+
+ msger.debug('Preparing partition using image %s' % (src))
+ part.prepare_rootfs_from_fs_image(cr_workdir, src, "")
diff --git a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
new file mode 100644
index 0000000..ed59d85
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -0,0 +1,550 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
+#
+# AUTHORS
+# Mihaly Varga <mihaly.varga (at] ni.com>
+
+import os
+import re
+import shutil
+import glob
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.misc import get_custom_config
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+
+class IsoImagePlugin(SourcePlugin):
+ """
+ Create a bootable ISO image
+
+ This plugin creates a hybrid, legacy and EFI bootable ISO image. The
+ generated image can be used on optical media as well as USB media.
+
+ Legacy boot uses syslinux and EFI boot uses grub or gummiboot (not
+ implemented yet) as bootloader. The plugin creates the directories required
+ by bootloaders and populates them by creating and configuring the
+ bootloader files.
+
+ Example kickstart file:
+ part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi, \\
+ image_name= IsoImage" --ondisk cd --label LIVECD --fstype=ext2
+ bootloader --timeout=10 --append=" "
+
+ In --sourceparams "loader" specifies the bootloader used for booting in EFI
+ mode, while "image_name" specifies the name of the generated image. In the
+ example above, wic creates an ISO image named IsoImage-cd.direct (default
+ extension added by direct imeger plugin) and a file named IsoImage-cd.iso
+ """
+
+ name = 'isoimage-isohybrid'
+
+ @classmethod
+ def do_configure_syslinux(cls, creator, cr_workdir):
+ """
+ Create loader-specific (syslinux) config
+ """
+ splash = os.path.join(cr_workdir, "ISO/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ bootloader = creator.ks.bootloader
+
+ syslinux_conf = ""
+ syslinux_conf += "PROMPT 0\n"
+ syslinux_conf += "TIMEOUT %s \n" % (bootloader.timeout or 10)
+ syslinux_conf += "\n"
+ syslinux_conf += "ALLOWOPTIONS 1\n"
+ syslinux_conf += "SERIAL 0 115200\n"
+ syslinux_conf += "\n"
+ if splashline:
+ syslinux_conf += "%s\n" % splashline
+ syslinux_conf += "DEFAULT boot\n"
+ syslinux_conf += "LABEL boot\n"
+
+ kernel = "/bzImage"
+ syslinux_conf += "KERNEL " + kernel + "\n"
+ syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
+ % bootloader.append
+
+ msger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg" \
+ % cr_workdir)
+ with open("%s/ISO/isolinux/isolinux.cfg" % cr_workdir, "w") as cfg:
+ cfg.write(syslinux_conf)
+
+ @classmethod
+ def do_configure_grubefi(cls, part, creator, cr_workdir):
+ """
+ Create loader-specific (grub-efi) config
+ """
+ configfile = creator.ks.bootloader.configfile
+ if configfile:
+ grubefi_conf = get_custom_config(configfile)
+ if grubefi_conf:
+ msger.debug("Using custom configuration file "
+ "%s for grub.cfg" % configfile)
+ else:
+ msger.error("configfile is specified but failed to "
+ "get it from %s." % configfile)
+ else:
+ splash = os.path.join(cr_workdir, "EFI/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ bootloader = creator.ks.bootloader
+
+ grubefi_conf = ""
+ grubefi_conf += "serial --unit=0 --speed=115200 --word=8 "
+ grubefi_conf += "--parity=no --stop=1\n"
+ grubefi_conf += "default=boot\n"
+ grubefi_conf += "timeout=%s\n" % (bootloader.timeout or 10)
+ grubefi_conf += "\n"
+ grubefi_conf += "search --set=root --label %s " % part.label
+ grubefi_conf += "\n"
+ grubefi_conf += "menuentry 'boot'{\n"
+
+ kernel = "/bzImage"
+
+ grubefi_conf += "linux %s rootwait %s\n" \
+ % (kernel, bootloader.append)
+ grubefi_conf += "initrd /initrd \n"
+ grubefi_conf += "}\n"
+
+ if splashline:
+ grubefi_conf += "%s\n" % splashline
+
+ msger.debug("Writing grubefi config %s/EFI/BOOT/grub.cfg" \
+ % cr_workdir)
+ with open("%s/EFI/BOOT/grub.cfg" % cr_workdir, "w") as cfg:
+ cfg.write(grubefi_conf)
+
+ @staticmethod
+ def _build_initramfs_path(rootfs_dir, cr_workdir):
+ """
+ Create path for initramfs image
+ """
+
+ initrd = get_bitbake_var("INITRD")
+ if not initrd:
+ initrd_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not initrd_dir:
+ msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting.\n")
+
+ image_name = get_bitbake_var("IMAGE_BASENAME")
+ if not image_name:
+ msger.error("Couldn't find IMAGE_BASENAME, exiting.\n")
+
+ image_type = get_bitbake_var("INITRAMFS_FSTYPES")
+ if not image_type:
+ msger.error("Couldn't find INITRAMFS_FSTYPES, exiting.\n")
+
+ machine_arch = get_bitbake_var("MACHINE_ARCH")
+ if not machine_arch:
+ msger.error("Couldn't find MACHINE_ARCH, exiting.\n")
+
+ initrd = glob.glob('%s/%s*%s.%s' % (initrd_dir, image_name, machine_arch, image_type))[0]
+
+ if not os.path.exists(initrd):
+ # Create initrd from rootfs directory
+ initrd = "%s/initrd.cpio.gz" % cr_workdir
+ initrd_dir = "%s/INITRD" % cr_workdir
+ shutil.copytree("%s" % rootfs_dir, \
+ "%s" % initrd_dir, symlinks=True)
+
+ if os.path.isfile("%s/init" % rootfs_dir):
+ shutil.copy2("%s/init" % rootfs_dir, "%s/init" % initrd_dir)
+ elif os.path.lexists("%s/init" % rootfs_dir):
+ os.symlink(os.readlink("%s/init" % rootfs_dir), \
+ "%s/init" % initrd_dir)
+ elif os.path.isfile("%s/sbin/init" % rootfs_dir):
+ shutil.copy2("%s/sbin/init" % rootfs_dir, \
+ "%s" % initrd_dir)
+ elif os.path.lexists("%s/sbin/init" % rootfs_dir):
+ os.symlink(os.readlink("%s/sbin/init" % rootfs_dir), \
+ "%s/init" % initrd_dir)
+ else:
+ msger.error("Couldn't find or build initrd, exiting.\n")
+
+ exec_cmd("cd %s && find . | cpio -o -H newc -R +0:+0 >./initrd.cpio " \
+ % initrd_dir, as_shell=True)
+ exec_cmd("gzip -f -9 -c %s/initrd.cpio > %s" \
+ % (cr_workdir, initrd), as_shell=True)
+ shutil.rmtree(initrd_dir)
+
+ return initrd
+
+ @classmethod
+ def do_stage_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Special content staging called before do_prepare_partition().
+ It cheks if all necessary tools are available, if not
+ tries to instal them.
+ """
+ # Make sure parted is available in native sysroot
+ if not os.path.isfile("%s/usr/sbin/parted" % native_sysroot):
+ msger.info("Building parted-native...\n")
+ exec_cmd("bitbake parted-native")
+
+ # Make sure mkfs.ext2/3/4 is available in native sysroot
+ if not os.path.isfile("%s/sbin/mkfs.ext2" % native_sysroot):
+ msger.info("Building e2fsprogs-native...\n")
+ exec_cmd("bitbake e2fsprogs-native")
+
+ # Make sure syslinux is available in sysroot and in native sysroot
+ syslinux_dir = get_bitbake_var("STAGING_DATADIR")
+ if not syslinux_dir:
+ msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
+ if not os.path.exists("%s/syslinux" % syslinux_dir):
+ msger.info("Building syslinux...\n")
+ exec_cmd("bitbake syslinux")
+ if not os.path.exists("%s/syslinux" % syslinux_dir):
+ msger.error("Please build syslinux first\n")
+
+ # Make sure syslinux is available in native sysroot
+ if not os.path.exists("%s/usr/bin/syslinux" % native_sysroot):
+ msger.info("Building syslinux-native...\n")
+ exec_cmd("bitbake syslinux-native")
+
+ #Make sure mkisofs is available in native sysroot
+ if not os.path.isfile("%s/usr/bin/mkisofs" % native_sysroot):
+ msger.info("Building cdrtools-native...\n")
+ exec_cmd("bitbake cdrtools-native")
+
+ # Make sure mkfs.vfat is available in native sysroot
+ if not os.path.isfile("%s/sbin/mkfs.vfat" % native_sysroot):
+ msger.info("Building dosfstools-native...\n")
+ exec_cmd("bitbake dosfstools-native")
+
+ # Make sure mtools is available in native sysroot
+ if not os.path.isfile("%s/usr/bin/mcopy" % native_sysroot):
+ msger.info("Building mtools-native...\n")
+ exec_cmd("bitbake mtools-native")
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates loader-specific config
+ """
+ isodir = "%s/ISO/" % cr_workdir
+
+ if os.path.exists(cr_workdir):
+ shutil.rmtree(cr_workdir)
+
+ install_cmd = "install -d %s " % isodir
+ exec_cmd(install_cmd)
+
+ # Overwrite the name of the created image
+ msger.debug("%s" % source_params)
+ if 'image_name' in source_params and \
+ source_params['image_name'].strip():
+ creator.name = source_params['image_name'].strip()
+ msger.debug("The name of the image is: %s" % creator.name)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for a bootable ISO image.
+ """
+
+ isodir = "%s/ISO" % cr_workdir
+
+ if part.rootfs_dir is None:
+ if not 'ROOTFS_DIR' in rootfs_dir:
+ msger.error("Couldn't find --rootfs-dir, exiting.\n")
+ rootfs_dir = rootfs_dir['ROOTFS_DIR']
+ else:
+ if part.rootfs_dir in rootfs_dir:
+ rootfs_dir = rootfs_dir[part.rootfs_dir]
+ elif part.rootfs_dir:
+ rootfs_dir = part.rootfs_dir
+ else:
+ msg = "Couldn't find --rootfs-dir=%s connection "
+ msg += "or it is not a valid path, exiting.\n"
+ msger.error(msg % part.rootfs_dir)
+
+ if not os.path.isdir(rootfs_dir):
+ rootfs_dir = get_bitbake_var("IMAGE_ROOTFS")
+ if not os.path.isdir(rootfs_dir):
+ msger.error("Couldn't find IMAGE_ROOTFS, exiting.\n")
+
+ part.rootfs_dir = rootfs_dir
+
+ # Prepare rootfs.img
+ hdd_dir = get_bitbake_var("HDDDIR")
+ img_iso_dir = get_bitbake_var("ISODIR")
+
+ rootfs_img = "%s/rootfs.img" % hdd_dir
+ if not os.path.isfile(rootfs_img):
+ rootfs_img = "%s/rootfs.img" % img_iso_dir
+ if not os.path.isfile(rootfs_img):
+ # check if rootfs.img is in deploydir
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ image_name = get_bitbake_var("IMAGE_LINK_NAME")
+ rootfs_img = "%s/%s.%s" \
+ % (deploy_dir, image_name, part.fstype)
+
+ if not os.path.isfile(rootfs_img):
+ # create image file with type specified by --fstype
+ # which contains rootfs
+ du_cmd = "du -bks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ part.size = int(out.split()[0])
+ part.extra_space = 0
+ part.overhead_factor = 1.2
+ part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, \
+ native_sysroot)
+ rootfs_img = part.source_file
+
+ install_cmd = "install -m 0644 %s %s/rootfs.img" \
+ % (rootfs_img, isodir)
+ exec_cmd(install_cmd)
+
+ # Remove the temporary file created by part.prepare_rootfs()
+ if os.path.isfile(part.source_file):
+ os.remove(part.source_file)
+
+ # Prepare initial ramdisk
+ initrd = "%s/initrd" % hdd_dir
+ if not os.path.isfile(initrd):
+ initrd = "%s/initrd" % img_iso_dir
+ if not os.path.isfile(initrd):
+ initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
+
+ install_cmd = "install -m 0644 %s %s/initrd" \
+ % (initrd, isodir)
+ exec_cmd(install_cmd)
+
+ # Remove the temporary file created by _build_initramfs_path function
+ if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
+ os.remove("%s/initrd.cpio.gz" % cr_workdir)
+
+ # Install bzImage
+ install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
+ (kernel_dir, isodir)
+ exec_cmd(install_cmd)
+
+ #Create bootloader for efi boot
+ try:
+ if source_params['loader'] == 'grub-efi':
+ # Builds grub.cfg if ISODIR didn't exist or
+ # didn't contains grub.cfg
+ bootimg_dir = img_iso_dir
+ if not os.path.exists("%s/EFI/BOOT" % bootimg_dir):
+ bootimg_dir = "%s/bootimg" % cr_workdir
+ if os.path.exists(bootimg_dir):
+ shutil.rmtree(bootimg_dir)
+ install_cmd = "install -d %s/EFI/BOOT" % bootimg_dir
+ exec_cmd(install_cmd)
+
+ if not os.path.isfile("%s/EFI/BOOT/boot.cfg" % bootimg_dir):
+ cls.do_configure_grubefi(part, creator, bootimg_dir)
+
+ # Builds bootx64.efi/bootia32.efi if ISODIR didn't exist or
+ # didn't contains it
+ target_arch = get_bitbake_var("TARGET_SYS")
+ if not target_arch:
+ msger.error("Coludn't find target architecture\n")
+
+ if re.match("x86_64", target_arch):
+ grub_target = 'x86_64-efi'
+ grub_image = "bootx64.efi"
+ elif re.match('i.86', target_arch):
+ grub_target = 'i386-efi'
+ grub_image = "bootia32.efi"
+ else:
+ msger.error("grub-efi is incompatible with target %s\n" \
+ % target_arch)
+
+ if not os.path.isfile("%s/EFI/BOOT/%s" \
+ % (bootimg_dir, grub_image)):
+ grub_path = get_bitbake_var("STAGING_LIBDIR")
+ if not grub_path:
+ msger.error("Couldn't find STAGING_LIBDIR, exiting.\n")
+
+ grub_core = "%s/grub/%s" % (grub_path, grub_target)
+ if not os.path.exists(grub_core):
+ msger.info("Building grub-efi...\n")
+ exec_cmd("bitbake grub-efi")
+ if not os.path.exists(grub_core):
+ msger.error("Please build grub-efi first\n")
+
+ grub_cmd = "grub-mkimage -p '/EFI/BOOT' "
+ grub_cmd += "-d %s " % grub_core
+ grub_cmd += "-O %s -o %s/EFI/BOOT/%s " \
+ % (grub_target, bootimg_dir, grub_image)
+ grub_cmd += "part_gpt part_msdos ntfs ntfscomp fat ext2 "
+ grub_cmd += "normal chain boot configfile linux multiboot "
+ grub_cmd += "search efi_gop efi_uga font gfxterm gfxmenu "
+ grub_cmd += "terminal minicmd test iorw loadenv echo help "
+ grub_cmd += "reboot serial terminfo iso9660 loopback tar "
+ grub_cmd += "memdisk ls search_fs_uuid udf btrfs xfs lvm "
+ grub_cmd += "reiserfs ata "
+ exec_native_cmd(grub_cmd, native_sysroot)
+
+ else:
+ # TODO: insert gummiboot stuff
+ msger.error("unrecognized bootimg-efi loader: %s" \
+ % source_params['loader'])
+ except KeyError:
+ msger.error("bootimg-efi requires a loader, none specified")
+
+ if os.path.exists("%s/EFI/BOOT" % isodir):
+ shutil.rmtree("%s/EFI/BOOT" % isodir)
+
+ shutil.copytree(bootimg_dir+"/EFI/BOOT", isodir+"/EFI/BOOT")
+
+ # If exists, remove cr_workdir/bootimg temporary folder
+ if os.path.exists("%s/bootimg" % cr_workdir):
+ shutil.rmtree("%s/bootimg" % cr_workdir)
+
+ # Create efi.img that contains bootloader files for EFI booting
+ # if ISODIR didn't exist or didn't contains it
+ if os.path.isfile("%s/efi.img" % img_iso_dir):
+ install_cmd = "install -m 0644 %s/efi.img %s/efi.img" % \
+ (img_iso_dir, isodir)
+ exec_cmd(install_cmd)
+ else:
+ du_cmd = "du -bks %s/EFI" % isodir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+ # Add some extra space for file system overhead
+ blocks += 100
+ msg = "Added 100 extra blocks to %s to get to %d total blocks" \
+ % (part.mountpoint, blocks)
+ msger.debug(msg)
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ blocks += (16 - (blocks % 16))
+
+ # dosfs image for EFI boot
+ bootimg = "%s/efi.img" % isodir
+
+ dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
+ % (bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mmd_cmd = "mmd -i %s ::/EFI" % bootimg
+ exec_native_cmd(mmd_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/EFI/* ::/EFI/" \
+ % (bootimg, isodir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ # Prepare files for legacy boot
+ syslinux_dir = get_bitbake_var("STAGING_DATADIR")
+ if not syslinux_dir:
+ msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
+
+ if os.path.exists("%s/isolinux" % isodir):
+ shutil.rmtree("%s/isolinux" % isodir)
+
+ install_cmd = "install -d %s/isolinux" % isodir
+ exec_cmd(install_cmd)
+
+ cls.do_configure_syslinux(creator, cr_workdir)
+
+ install_cmd = "install -m 444 %s/syslinux/ldlinux.sys " % syslinux_dir
+ install_cmd += "%s/isolinux/ldlinux.sys" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 444 %s/syslinux/isohdpfx.bin " % syslinux_dir
+ install_cmd += "%s/isolinux/isohdpfx.bin" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 644 %s/syslinux/isolinux.bin " % syslinux_dir
+ install_cmd += "%s/isolinux/isolinux.bin" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 644 %s/syslinux/ldlinux.c32 " % syslinux_dir
+ install_cmd += "%s/isolinux/ldlinux.c32" % isodir
+ exec_cmd(install_cmd)
+
+ #create ISO image
+ iso_img = "%s/tempiso_img.iso" % cr_workdir
+ iso_bootimg = "isolinux/isolinux.bin"
+ iso_bootcat = "isolinux/boot.cat"
+ efi_img = "efi.img"
+
+ mkisofs_cmd = "mkisofs -V %s " % part.label
+ mkisofs_cmd += "-o %s -U " % iso_img
+ mkisofs_cmd += "-J -joliet-long -r -iso-level 2 -b %s " % iso_bootimg
+ mkisofs_cmd += "-c %s -no-emul-boot -boot-load-size 4 " % iso_bootcat
+ mkisofs_cmd += "-boot-info-table -eltorito-alt-boot "
+ mkisofs_cmd += "-eltorito-platform 0xEF -eltorito-boot %s " % efi_img
+ mkisofs_cmd += "-no-emul-boot %s " % isodir
+
+ msger.debug("running command: %s" % mkisofs_cmd)
+ exec_native_cmd(mkisofs_cmd, native_sysroot)
+
+ shutil.rmtree(isodir)
+
+ du_cmd = "du -Lbks %s" % iso_img
+ out = exec_cmd(du_cmd)
+ isoimg_size = int(out.split()[0])
+
+ part.size = isoimg_size
+ part.source_file = iso_img
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. In this case, we insert/modify the MBR using isohybrid
+ utility for booting via BIOS from disk storage devices.
+ """
+
+ full_path = creator._full_path(workdir, disk_name, "direct")
+ iso_img = "%s.p1" % full_path
+ full_path_iso = creator._full_path(workdir, disk_name, "iso")
+
+ isohybrid_cmd = "isohybrid -u %s" % iso_img
+ msger.debug("running command: %s" % \
+ isohybrid_cmd)
+ exec_native_cmd(isohybrid_cmd, native_sysroot)
+
+ # Replace the image created by direct plugin with the one created by
+ # mkisofs command. This is necessary because the iso image created by
+ # mkisofs has a very specific MBR is system area of the ISO image, and
+ # direct plugin adds and configures an another MBR.
+ msger.debug("Replaceing the image created by direct plugin\n")
+ os.remove(full_path)
+ shutil.copy2(iso_img, full_path_iso)
+ shutil.copy2(full_path_iso, full_path)
+
+ # Remove temporary ISO file
+ os.remove(iso_img)
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
new file mode 100644
index 0000000..7ce0cc4
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -0,0 +1,88 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import os
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.oe.misc import exec_cmd, get_bitbake_var
+
+class RawCopyPlugin(SourcePlugin):
+ """
+ Populate partition content from raw image file.
+ """
+
+ name = 'rawcopy'
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. Do nothing.
+ """
+ pass
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(). Possibly prepare
+ configuration files of some sort.
+ """
+ pass
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ if not bootimg_dir:
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+
+ msger.debug('Bootimg dir: %s' % bootimg_dir)
+
+ if 'file' not in source_params:
+ msger.error("No file specified\n")
+ return
+
+ src = os.path.join(bootimg_dir, source_params['file'])
+ dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+
+ if 'skip' in source_params:
+ dd_cmd = "dd if=%s of=%s ibs=%s skip=1 conv=notrunc" % \
+ (src, dst, source_params['skip'])
+ else:
+ dd_cmd = "cp %s %s" % (src, dst)
+ exec_cmd(dd_cmd)
+
+ # get the size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % dst
+ out = exec_cmd(du_cmd)
+ filesize = out.split()[0]
+
+ if int(filesize) > int(part.size):
+ part.size = filesize
+
+ part.source_file = dst
+
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
new file mode 100644
index 0000000..425da8b
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -0,0 +1,83 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'rootfs' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+# Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
+#
+
+import os
+
+from wic import msger
+from wic.pluginbase import SourcePlugin
+from wic.utils.oe.misc import get_bitbake_var
+
+class RootfsPlugin(SourcePlugin):
+ """
+ Populate partition content from a rootfs directory.
+ """
+
+ name = 'rootfs'
+
+ @staticmethod
+ def __get_rootfs_dir(rootfs_dir):
+ if os.path.isdir(rootfs_dir):
+ return rootfs_dir
+
+ image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
+ if not os.path.isdir(image_rootfs_dir):
+ msg = "No valid artifact IMAGE_ROOTFS from image named"
+ msg += " %s has been found at %s, exiting.\n" % \
+ (rootfs_dir, image_rootfs_dir)
+ msger.error(msg)
+
+ return image_rootfs_dir
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ krootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for legacy bios boot partition.
+ """
+ if part.rootfs_dir is None:
+ if not 'ROOTFS_DIR' in krootfs_dir:
+ msg = "Couldn't find --rootfs-dir, exiting"
+ msger.error(msg)
+ rootfs_dir = krootfs_dir['ROOTFS_DIR']
+ else:
+ if part.rootfs_dir in krootfs_dir:
+ rootfs_dir = krootfs_dir[part.rootfs_dir]
+ elif part.rootfs_dir:
+ rootfs_dir = part.rootfs_dir
+ else:
+ msg = "Couldn't find --rootfs-dir=%s connection"
+ msg += " or it is not a valid path, exiting"
+ msger.error(msg % part.rootfs_dir)
+
+ real_rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+
+ part.rootfs_dir = real_rootfs_dir
+ part.prepare_rootfs(cr_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
+
diff --git a/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py b/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
new file mode 100644
index 0000000..3d60e6f
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
@@ -0,0 +1,177 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can distribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for mo details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# AUTHOR
+# Adrian Freihofer <adrian.freihofer (at] neratec.com>
+#
+
+import os
+from wic import msger
+from wic.utils import syslinux
+from wic.utils import runner
+from wic.utils.oe import misc
+from wic.utils.errors import ImageError
+from wic.pluginbase import SourcePlugin
+
+
+# pylint: disable=no-init
+class RootfsPlugin(SourcePlugin):
+ """
+ Create root partition and install syslinux bootloader
+
+ This plugin creates a disk image containing a bootable root partition with
+ syslinux installed. The filesystem is ext2/3/4, no extra boot partition is
+ required.
+
+ Example kickstart file:
+ part / --source rootfs-pcbios-ext --ondisk sda --fstype=ext4 --label rootfs --align 1024
+ bootloader --source rootfs-pcbios-ext --timeout=0 --append="rootwait rootfstype=ext4"
+
+ The first line generates a root file system including a syslinux.cfg file
+ The "--source rootfs-pcbios-ext" in the second line triggers the installation
+ of ldlinux.sys into the image.
+ """
+
+ name = 'rootfs-pcbios-ext'
+
+ @staticmethod
+ def _get_rootfs_dir(rootfs_dir):
+ """
+ Find rootfs pseudo dir
+
+ If rootfs_dir is a directory consider it as rootfs directory.
+ Otherwise ask bitbake about the IMAGE_ROOTFS directory.
+ """
+ if os.path.isdir(rootfs_dir):
+ return rootfs_dir
+
+ image_rootfs_dir = misc.get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
+ if not os.path.isdir(image_rootfs_dir):
+ msg = "No valid artifact IMAGE_ROOTFS from image named"
+ msg += " %s has been found at %s, exiting.\n" % \
+ (rootfs_dir, image_rootfs_dir)
+ msger.error(msg)
+
+ return image_rootfs_dir
+
+ # pylint: disable=unused-argument
+ @classmethod
+ def do_configure_partition(cls, part, source_params, image_creator,
+ image_creator_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, native_sysroot):
+ """
+ Creates syslinux config in rootfs directory
+
+ Called before do_prepare_partition()
+ """
+ bootloader = image_creator.ks.bootloader
+
+ syslinux_conf = ""
+ syslinux_conf += "PROMPT 0\n"
+
+ syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
+ syslinux_conf += "ALLOWOPTIONS 1\n"
+
+ # Derive SERIAL... line from from kernel boot parameters
+ syslinux_conf += syslinux.serial_console_form_kargs(options) + "\n"
+
+ syslinux_conf += "DEFAULT linux\n"
+ syslinux_conf += "LABEL linux\n"
+ syslinux_conf += " KERNEL /boot/bzImage\n"
+
+ syslinux_conf += " APPEND label=boot root=%s %s\n" % \
+ (image_creator.rootdev, bootloader.append)
+
+ syslinux_cfg = os.path.join(image_creator.rootfs_dir['ROOTFS_DIR'], "boot", "syslinux.cfg")
+ msger.debug("Writing syslinux config %s" % syslinux_cfg)
+ with open(syslinux_cfg, "w") as cfg:
+ cfg.write(syslinux_conf)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, image_creator,
+ image_creator_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, krootfs_dir, native_sysroot):
+ """
+ Creates partition out of rootfs directory
+
+ Prepare content for a rootfs partition i.e. create a partition
+ and fill it from a /rootfs dir.
+ Install syslinux bootloader into root partition image file
+ """
+ def is_exe(exepath):
+ """Verify exepath is an executable file"""
+ return os.path.isfile(exepath) and os.access(exepath, os.X_OK)
+
+ # Make sure syslinux-nomtools is available in native sysroot or fail
+ native_syslinux_nomtools = os.path.join(native_sysroot, "usr/bin/syslinux-nomtools")
+ if not is_exe(native_syslinux_nomtools):
+ msger.info("building syslinux-native...")
+ misc.exec_cmd("bitbake syslinux-native")
+ if not is_exe(native_syslinux_nomtools):
+ msger.error("Couldn't find syslinux-nomtools (%s), exiting\n" %
+ native_syslinux_nomtools)
+
+ if part.rootfs is None:
+ if 'ROOTFS_DIR' not in krootfs_dir:
+ msger.error("Couldn't find --rootfs-dir, exiting")
+ rootfs_dir = krootfs_dir['ROOTFS_DIR']
+ else:
+ if part.rootfs in krootfs_dir:
+ rootfs_dir = krootfs_dir[part.rootfs]
+ elif part.rootfs:
+ rootfs_dir = part.rootfs
+ else:
+ msg = "Couldn't find --rootfs-dir=%s connection"
+ msg += " or it is not a valid path, exiting"
+ msger.error(msg % part.rootfs)
+
+ real_rootfs_dir = cls._get_rootfs_dir(rootfs_dir)
+
+ part.rootfs_dir = real_rootfs_dir
+ part.prepare_rootfs(image_creator_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
+
+ # install syslinux into rootfs partition
+ syslinux_cmd = "syslinux-nomtools -d /boot -i %s" % part.source_file
+ misc.exec_native_cmd(syslinux_cmd, native_sysroot)
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, image_creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Assemble partitions to disk image
+
+ Called after all partitions have been prepared and assembled into a
+ disk image. In this case, we install the MBR.
+ """
+ mbrfile = os.path.join(native_sysroot, "usr/share/syslinux/")
+ if image_creator.ptable_format == 'msdos':
+ mbrfile += "mbr.bin"
+ elif image_creator.ptable_format == 'gpt':
+ mbrfile += "gptmbr.bin"
+ else:
+ msger.error("Unsupported partition table: %s" % \
+ image_creator.ptable_format)
+
+ if not os.path.exists(mbrfile):
+ msger.error("Couldn't find %s. Has syslinux-native been baked?" % mbrfile)
+
+ full_path = disk['disk'].device
+ msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
+ % (disk_name, full_path, disk['min_size']))
+
+ ret_code = runner.show(['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc'])
+ if ret_code != 0:
+ raise ImageError("Unable to set MBR to %s" % full_path)
diff --git a/scripts/lib/wic/test b/scripts/lib/wic/test
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/scripts/lib/wic/test
@@ -0,0 +1 @@
+test
diff --git a/scripts/lib/wic/utils/__init__.py b/scripts/lib/wic/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/lib/wic/utils/__init__.py
diff --git a/scripts/lib/wic/utils/errors.py b/scripts/lib/wic/utils/errors.py
new file mode 100644
index 0000000..d1b514d
--- /dev/null
+++ b/scripts/lib/wic/utils/errors.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2007 Red Hat, Inc.
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+class WicError(Exception):
+ pass
+
+class CreatorError(WicError):
+ pass
+
+class Usage(WicError):
+ pass
+
+class ImageError(WicError):
+ pass
diff --git a/scripts/lib/wic/utils/fs_related.py b/scripts/lib/wic/utils/fs_related.py
new file mode 100644
index 0000000..2e74461
--- /dev/null
+++ b/scripts/lib/wic/utils/fs_related.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2007, Red Hat, Inc.
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+from __future__ import with_statement
+import os
+import errno
+
+from wic.utils.oe.misc import exec_cmd
+
+def makedirs(dirname):
+ """A version of os.makedirs() that doesn't throw an
+ exception if the leaf directory already exists.
+ """
+ try:
+ os.makedirs(dirname)
+ except OSError, err:
+ if err.errno != errno.EEXIST:
+ raise
+
+class Disk:
+ """
+ Generic base object for a disk.
+ """
+ def __init__(self, size, device=None):
+ self._device = device
+ self._size = size
+
+ def create(self):
+ pass
+
+ def cleanup(self):
+ pass
+
+ def get_device(self):
+ return self._device
+ def set_device(self, path):
+ self._device = path
+ device = property(get_device, set_device)
+
+ def get_size(self):
+ return self._size
+ size = property(get_size)
+
+
+class DiskImage(Disk):
+ """
+ A Disk backed by a file.
+ """
+ def __init__(self, image_file, size):
+ Disk.__init__(self, size)
+ self.image_file = image_file
+
+ def exists(self):
+ return os.path.exists(self.image_file)
+
+ def create(self):
+ if self.device is not None:
+ return
+
+ blocks = self.size / 1024
+ if self.size - blocks * 1024:
+ blocks += 1
+
+ # create disk image
+ dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=1" % \
+ (self.image_file, blocks)
+ exec_cmd(dd_cmd)
+
+ self.device = self.image_file
diff --git a/scripts/lib/wic/utils/misc.py b/scripts/lib/wic/utils/misc.py
new file mode 100644
index 0000000..1415ae9
--- /dev/null
+++ b/scripts/lib/wic/utils/misc.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2010, 2011 Intel Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import time
+import wic.engine
+
+def build_name(kscfg, release=None, prefix=None, suffix=None):
+ """Construct and return an image name string.
+
+ This is a utility function to help create sensible name and fslabel
+ strings. The name is constructed using the sans-prefix-and-extension
+ kickstart filename and the supplied prefix and suffix.
+
+ kscfg -- a path to a kickstart file
+ release -- a replacement to suffix for image release
+ prefix -- a prefix to prepend to the name; defaults to None, which causes
+ no prefix to be used
+ suffix -- a suffix to append to the name; defaults to None, which causes
+ a YYYYMMDDHHMM suffix to be used
+
+ Note, if maxlen is less then the len(suffix), you get to keep both pieces.
+
+ """
+ name = os.path.basename(kscfg)
+ idx = name.rfind('.')
+ if idx >= 0:
+ name = name[:idx]
+
+ if release is not None:
+ suffix = ""
+ if prefix is None:
+ prefix = ""
+ if suffix is None:
+ suffix = time.strftime("%Y%m%d%H%M")
+
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+
+ prefix = "%s-" % prefix if prefix else ""
+ suffix = "-%s" % suffix if suffix else ""
+
+ ret = prefix + name + suffix
+
+ return ret
+
+def find_canned(scripts_path, file_name):
+ """
+ Find a file either by its path or by name in the canned files dir.
+
+ Return None if not found
+ """
+ if os.path.exists(file_name):
+ return file_name
+
+ layers_canned_wks_dir = wic.engine.build_canned_image_list(scripts_path)
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname == file_name:
+ fullpath = os.path.join(canned_wks_dir, fname)
+ return fullpath
+
+def get_custom_config(boot_file):
+ """
+ Get the custom configuration to be used for the bootloader.
+
+ Return None if the file can't be found.
+ """
+ scripts_path = os.path.abspath(os.path.dirname(__file__))
+ # Get the scripts path of poky
+ for x in range(0, 3):
+ scripts_path = os.path.dirname(scripts_path)
+
+ cfg_file = find_canned(scripts_path, boot_file)
+ if cfg_file:
+ with open(cfg_file, "r") as f:
+ config = f.read()
+ return config
+
+ return None
diff --git a/scripts/lib/wic/utils/oe/__init__.py b/scripts/lib/wic/utils/oe/__init__.py
new file mode 100644
index 0000000..0a81575
--- /dev/null
+++ b/scripts/lib/wic/utils/oe/__init__.py
@@ -0,0 +1,22 @@
+#
+# OpenEmbedded wic utils library
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
diff --git a/scripts/lib/wic/utils/oe/misc.py b/scripts/lib/wic/utils/oe/misc.py
new file mode 100644
index 0000000..81239ac
--- /dev/null
+++ b/scripts/lib/wic/utils/oe/misc.py
@@ -0,0 +1,250 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides a place to collect various wic-related utils
+# for the OpenEmbedded Image Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+"""Miscellaneous functions."""
+
+import os
+from collections import defaultdict
+
+from wic import msger
+from wic.utils import runner
+
+# executable -> recipe pairs for exec_native_cmd
+NATIVE_RECIPES = {"mcopy": "mtools",
+ "mkdosfs": "dosfstools",
+ "mkfs.btrfs": "btrfs-tools",
+ "mkfs.ext2": "e2fsprogs",
+ "mkfs.ext3": "e2fsprogs",
+ "mkfs.ext4": "e2fsprogs",
+ "mkfs.vfat": "dosfstools",
+ "mksquashfs": "squashfs-tools",
+ "mkswap": "util-linux",
+ "parted": "parted",
+ "sgdisk": "gptfdisk",
+ "syslinux": "syslinux"
+ }
+
+def _exec_cmd(cmd_and_args, as_shell=False, catch=3):
+ """
+ Execute command, catching stderr, stdout
+
+ Need to execute as_shell if the command uses wildcards
+ """
+ msger.debug("_exec_cmd: %s" % cmd_and_args)
+ args = cmd_and_args.split()
+ msger.debug(args)
+
+ if as_shell:
+ ret, out = runner.runtool(cmd_and_args, catch)
+ else:
+ ret, out = runner.runtool(args, catch)
+ out = out.strip()
+ msger.debug("_exec_cmd: output for %s (rc = %d): %s" % \
+ (cmd_and_args, ret, out))
+
+ return (ret, out)
+
+
+def exec_cmd(cmd_and_args, as_shell=False, catch=3):
+ """
+ Execute command, catching stderr, stdout
+
+ Exits if rc non-zero
+ """
+ ret, out = _exec_cmd(cmd_and_args, as_shell, catch)
+
+ if ret != 0:
+ msger.error("exec_cmd: %s returned '%s' instead of 0" % \
+ (cmd_and_args, ret))
+
+ return out
+
+def cmd_in_path(cmd, path):
+ import scriptpath
+
+ scriptpath.add_bitbake_lib_path()
+
+ return bb.utils.which(path, cmd) != "" or False
+
+def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
+ """
+ Execute native command, catching stderr, stdout
+
+ Need to execute as_shell if the command uses wildcards
+
+ Always need to execute native commands as_shell
+ """
+ # The reason -1 is used is because there may be "export" commands.
+ args = cmd_and_args.split(';')[-1].split()
+ msger.debug(args)
+
+ if pseudo:
+ cmd_and_args = pseudo + cmd_and_args
+ native_paths = \
+ "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+ (native_sysroot, native_sysroot, native_sysroot)
+ native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
+ (native_paths, cmd_and_args)
+ msger.debug("exec_native_cmd: %s" % cmd_and_args)
+
+ # If the command isn't in the native sysroot say we failed.
+ if cmd_in_path(args[0], native_paths):
+ ret, out = _exec_cmd(native_cmd_and_args, True, catch)
+ else:
+ ret = 127
+
+ prog = args[0]
+ # shell command-not-found
+ if ret == 127 \
+ or (pseudo and ret == 1 and out == "Can't find '%s' in $PATH." % prog):
+ msg = "A native program %s required to build the image "\
+ "was not found (see details above).\n\n" % prog
+ recipe = NATIVE_RECIPES.get(prog)
+ if recipe:
+ msg += "Please bake it with 'bitbake %s-native' "\
+ "and try again.\n" % recipe
+ else:
+ msg += "Wic failed to find a recipe to build native %s. Please "\
+ "file a bug against wic.\n" % prog
+ msger.error(msg)
+ if out:
+ msger.debug('"%s" output: %s' % (args[0], out))
+
+ if ret != 0:
+ msger.error("exec_cmd: '%s' returned '%s' instead of 0" % \
+ (cmd_and_args, ret))
+
+ return ret, out
+
+BOOTDD_EXTRA_SPACE = 16384
+
+class BitbakeVars(defaultdict):
+ """
+ Container for Bitbake variables.
+ """
+ def __init__(self):
+ defaultdict.__init__(self, dict)
+
+ # default_image and vars_dir attributes should be set from outside
+ self.default_image = None
+ self.vars_dir = None
+
+ def _parse_line(self, line, image):
+ """
+ Parse one line from bitbake -e output or from .env file.
+ Put result key-value pair into the storage.
+ """
+ if "=" not in line:
+ return
+ try:
+ key, val = line.split("=")
+ except ValueError:
+ return
+ key = key.strip()
+ val = val.strip()
+ if key.replace('_', '').isalnum():
+ self[image][key] = val.strip('"')
+
+ def get_var(self, var, image=None):
+ """
+ Get bitbake variable from 'bitbake -e' output or from .env file.
+ This is a lazy method, i.e. it runs bitbake or parses file only when
+ only when variable is requested. It also caches results.
+ """
+ if not image:
+ image = self.default_image
+
+ if image not in self:
+ if image and self.vars_dir:
+ fname = os.path.join(self.vars_dir, image + '.env')
+ if os.path.isfile(fname):
+ # parse .env file
+ with open(fname) as varsfile:
+ for line in varsfile:
+ self._parse_line(line, image)
+ else:
+ print "Couldn't get bitbake variable from %s." % fname
+ print "File %s doesn't exist." % fname
+ return
+ else:
+ # Get bitbake -e output
+ cmd = "bitbake -e"
+ if image:
+ cmd += " %s" % image
+
+ log_level = msger.get_loglevel()
+ msger.set_loglevel('normal')
+ ret, lines = _exec_cmd(cmd)
+ msger.set_loglevel(log_level)
+
+ if ret:
+ print "Couldn't get '%s' output." % cmd
+ print "Bitbake failed with error:\n%s\n" % lines
+ return
+
+ # Parse bitbake -e output
+ for line in lines.split('\n'):
+ self._parse_line(line, image)
+
+ # Make first image a default set of variables
+ images = [key for key in self if key]
+ if len(images) == 1:
+ self[None] = self[image]
+
+ return self[image].get(var)
+
+# Create BB_VARS singleton
+BB_VARS = BitbakeVars()
+
+def get_bitbake_var(var, image=None):
+ """
+ Provide old get_bitbake_var API by wrapping
+ get_var method of BB_VARS singleton.
+ """
+ return BB_VARS.get_var(var, image)
+
+def parse_sourceparams(sourceparams):
+ """
+ Split sourceparams string of the form key1=val1[,key2=val2,...]
+ into a dict. Also accepts valueless keys i.e. without =.
+
+ Returns dict of param key/val pairs (note that val may be None).
+ """
+ params_dict = {}
+
+ params = sourceparams.split(',')
+ if params:
+ for par in params:
+ if not par:
+ continue
+ if not '=' in par:
+ key = par
+ val = None
+ else:
+ key, val = par.split('=')
+ params_dict[key] = val
+
+ return params_dict
diff --git a/scripts/lib/wic/utils/partitionedfs.py b/scripts/lib/wic/utils/partitionedfs.py
new file mode 100644
index 0000000..ad596d2
--- /dev/null
+++ b/scripts/lib/wic/utils/partitionedfs.py
@@ -0,0 +1,360 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2009, 2010, 2011 Intel, Inc.
+# Copyright (c) 2007, 2008 Red Hat, Inc.
+# Copyright (c) 2008 Daniel P. Berrange
+# Copyright (c) 2008 David P. Huff
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+from wic import msger
+from wic.utils.errors import ImageError
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd
+
+# Overhead of the MBR partitioning scheme (just one sector)
+MBR_OVERHEAD = 1
+
+# Overhead of the GPT partitioning scheme
+GPT_OVERHEAD = 34
+
+# Size of a sector in bytes
+SECTOR_SIZE = 512
+
+class Image(object):
+ """
+ Generic base object for an image.
+
+ An Image is a container for a set of DiskImages and associated
+ partitions.
+ """
+ def __init__(self, native_sysroot=None):
+ self.disks = {}
+ self.partitions = []
+ # Size of a sector used in calculations
+ self.sector_size = SECTOR_SIZE
+ self._partitions_layed_out = False
+ self.native_sysroot = native_sysroot
+
+ def __add_disk(self, disk_name):
+ """ Add a disk 'disk_name' to the internal list of disks. Note,
+ 'disk_name' is the name of the disk in the target system
+ (e.g., sdb). """
+
+ if disk_name in self.disks:
+ # We already have this disk
+ return
+
+ assert not self._partitions_layed_out
+
+ self.disks[disk_name] = \
+ {'disk': None, # Disk object
+ 'numpart': 0, # Number of allocate partitions
+ 'realpart': 0, # Number of partitions in the partition table
+ 'partitions': [], # Indexes to self.partitions
+ 'offset': 0, # Offset of next partition (in sectors)
+ # Minimum required disk size to fit all partitions (in bytes)
+ 'min_size': 0,
+ 'ptable_format': "msdos"} # Partition table format
+
+ def add_disk(self, disk_name, disk_obj):
+ """ Add a disk object which have to be partitioned. More than one disk
+ can be added. In case of multiple disks, disk partitions have to be
+ added for each disk separately with 'add_partition()". """
+
+ self.__add_disk(disk_name)
+ self.disks[disk_name]['disk'] = disk_obj
+
+ def __add_partition(self, part):
+ """ This is a helper function for 'add_partition()' which adds a
+ partition to the internal list of partitions. """
+
+ assert not self._partitions_layed_out
+
+ self.partitions.append(part)
+ self.__add_disk(part['disk_name'])
+
+ def add_partition(self, size, disk_name, mountpoint, source_file=None, fstype=None,
+ label=None, fsopts=None, boot=False, align=None, no_table=False,
+ part_type=None, uuid=None):
+ """ Add the next partition. Prtitions have to be added in the
+ first-to-last order. """
+
+ ks_pnum = len(self.partitions)
+
+ # Converting kB to sectors for parted
+ size = size * 1024 / self.sector_size
+
+ part = {'ks_pnum': ks_pnum, # Partition number in the KS file
+ 'size': size, # In sectors
+ 'mountpoint': mountpoint, # Mount relative to chroot
+ 'source_file': source_file, # partition contents
+ 'fstype': fstype, # Filesystem type
+ 'fsopts': fsopts, # Filesystem mount options
+ 'label': label, # Partition label
+ 'disk_name': disk_name, # physical disk name holding partition
+ 'device': None, # kpartx device node for partition
+ 'num': None, # Partition number
+ 'boot': boot, # Bootable flag
+ 'align': align, # Partition alignment
+ 'no_table' : no_table, # Partition does not appear in partition table
+ 'part_type' : part_type, # Partition type
+ 'uuid': uuid} # Partition UUID
+
+ self.__add_partition(part)
+
+ def layout_partitions(self, ptable_format="msdos"):
+ """ Layout the partitions, meaning calculate the position of every
+ partition on the disk. The 'ptable_format' parameter defines the
+ partition table format and may be "msdos". """
+
+ msger.debug("Assigning %s partitions to disks" % ptable_format)
+
+ if self._partitions_layed_out:
+ return
+
+ self._partitions_layed_out = True
+
+ # Go through partitions in the order they are added in .ks file
+ for num in range(len(self.partitions)):
+ part = self.partitions[num]
+
+ if not self.disks.has_key(part['disk_name']):
+ raise ImageError("No disk %s for partition %s" \
+ % (part['disk_name'], part['mountpoint']))
+
+ if ptable_format == 'msdos' and part['part_type']:
+ # The --part-type can also be implemented for MBR partitions,
+ # in which case it would map to the 1-byte "partition type"
+ # filed at offset 3 of the partition entry.
+ raise ImageError("setting custom partition type is not " \
+ "implemented for msdos partitions")
+
+ # Get the disk where the partition is located
+ disk = self.disks[part['disk_name']]
+ disk['numpart'] += 1
+ if not part['no_table']:
+ disk['realpart'] += 1
+ disk['ptable_format'] = ptable_format
+
+ if disk['numpart'] == 1:
+ if ptable_format == "msdos":
+ overhead = MBR_OVERHEAD
+ elif ptable_format == "gpt":
+ overhead = GPT_OVERHEAD
+
+ # Skip one sector required for the partitioning scheme overhead
+ disk['offset'] += overhead
+
+ if disk['realpart'] > 3:
+ # Reserve a sector for EBR for every logical partition
+ # before alignment is performed.
+ if ptable_format == "msdos":
+ disk['offset'] += 1
+
+
+ if part['align']:
+ # If not first partition and we do have alignment set we need
+ # to align the partition.
+ # FIXME: This leaves a empty spaces to the disk. To fill the
+ # gaps we could enlargea the previous partition?
+
+ # Calc how much the alignment is off.
+ align_sectors = disk['offset'] % (part['align'] * 1024 / self.sector_size)
+
+ if align_sectors:
+ # If partition is not aligned as required, we need
+ # to move forward to the next alignment point
+ align_sectors = (part['align'] * 1024 / self.sector_size) - align_sectors
+
+ msger.debug("Realignment for %s%s with %s sectors, original"
+ " offset %s, target alignment is %sK." %
+ (part['disk_name'], disk['numpart'], align_sectors,
+ disk['offset'], part['align']))
+
+ # increase the offset so we actually start the partition on right alignment
+ disk['offset'] += align_sectors
+
+ part['start'] = disk['offset']
+ disk['offset'] += part['size']
+
+ part['type'] = 'primary'
+ if not part['no_table']:
+ part['num'] = disk['realpart']
+ else:
+ part['num'] = 0
+
+ if disk['ptable_format'] == "msdos":
+ if disk['realpart'] > 3:
+ part['type'] = 'logical'
+ part['num'] = disk['realpart'] + 1
+
+ disk['partitions'].append(num)
+ msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
+ "sectors (%d bytes)." \
+ % (part['mountpoint'], part['disk_name'], part['num'],
+ part['start'], part['start'] + part['size'] - 1,
+ part['size'], part['size'] * self.sector_size))
+
+ # Once all the partitions have been layed out, we can calculate the
+ # minumim disk sizes.
+ for disk in self.disks.values():
+ disk['min_size'] = disk['offset']
+ if disk['ptable_format'] == "gpt":
+ disk['min_size'] += GPT_OVERHEAD
+
+ disk['min_size'] *= self.sector_size
+
+ def __create_partition(self, device, parttype, fstype, start, size):
+ """ Create a partition on an image described by the 'device' object. """
+
+ # Start is included to the size so we need to substract one from the end.
+ end = start + size - 1
+ msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
+ (parttype, start, end, size))
+
+ cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
+ if fstype:
+ cmd += " %s" % fstype
+ cmd += " %d %d" % (start, end)
+
+ return exec_native_cmd(cmd, self.native_sysroot)
+
+ def __format_disks(self):
+ self.layout_partitions()
+
+ for dev in self.disks.keys():
+ disk = self.disks[dev]
+ msger.debug("Initializing partition table for %s" % \
+ (disk['disk'].device))
+ exec_native_cmd("parted -s %s mklabel %s" % \
+ (disk['disk'].device, disk['ptable_format']),
+ self.native_sysroot)
+
+ msger.debug("Creating partitions")
+
+ for part in self.partitions:
+ if part['num'] == 0:
+ continue
+
+ disk = self.disks[part['disk_name']]
+ if disk['ptable_format'] == "msdos" and part['num'] == 5:
+ # Create an extended partition (note: extended
+ # partition is described in MBR and contains all
+ # logical partitions). The logical partitions save a
+ # sector for an EBR just before the start of a
+ # partition. The extended partition must start one
+ # sector before the start of the first logical
+ # partition. This way the first EBR is inside of the
+ # extended partition. Since the extended partitions
+ # starts a sector before the first logical partition,
+ # add a sector at the back, so that there is enough
+ # room for all logical partitions.
+ self.__create_partition(disk['disk'].device, "extended",
+ None, part['start'] - 1,
+ disk['offset'] - part['start'] + 1)
+
+ if part['fstype'] == "swap":
+ parted_fs_type = "linux-swap"
+ elif part['fstype'] == "vfat":
+ parted_fs_type = "fat32"
+ elif part['fstype'] == "msdos":
+ parted_fs_type = "fat16"
+ elif part['fstype'] == "ontrackdm6aux3":
+ parted_fs_type = "ontrackdm6aux3"
+ else:
+ # Type for ext2/ext3/ext4/btrfs
+ parted_fs_type = "ext2"
+
+ # Boot ROM of OMAP boards require vfat boot partition to have an
+ # even number of sectors.
+ if part['mountpoint'] == "/boot" and part['fstype'] in ["vfat", "msdos"] \
+ and part['size'] % 2:
+ msger.debug("Substracting one sector from '%s' partition to " \
+ "get even number of sectors for the partition" % \
+ part['mountpoint'])
+ part['size'] -= 1
+
+ self.__create_partition(disk['disk'].device, part['type'],
+ parted_fs_type, part['start'], part['size'])
+
+ if part['part_type']:
+ msger.debug("partition %d: set type UID to %s" % \
+ (part['num'], part['part_type']))
+ exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
+ (part['num'], part['part_type'],
+ disk['disk'].device), self.native_sysroot)
+
+ if part['uuid']:
+ msger.debug("partition %d: set UUID to %s" % \
+ (part['num'], part['uuid']))
+ exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
+ (part['num'], part['uuid'], disk['disk'].device),
+ self.native_sysroot)
+
+ if part['boot']:
+ flag_name = "legacy_boot" if disk['ptable_format'] == 'gpt' else "boot"
+ msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
+ (flag_name, part['num'], disk['disk'].device))
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (disk['disk'].device, part['num'], flag_name),
+ self.native_sysroot)
+
+ # Parted defaults to enabling the lba flag for fat16 partitions,
+ # which causes compatibility issues with some firmware (and really
+ # isn't necessary).
+ if parted_fs_type == "fat16":
+ if disk['ptable_format'] == 'msdos':
+ msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
+ (part['num'], disk['disk'].device))
+ exec_native_cmd("parted -s %s set %d lba off" % \
+ (disk['disk'].device, part['num']),
+ self.native_sysroot)
+
+ def cleanup(self):
+ if self.disks:
+ for dev in self.disks:
+ disk = self.disks[dev]
+ try:
+ disk['disk'].cleanup()
+ except:
+ pass
+
+ def assemble(self, image_file):
+ msger.debug("Installing partitions")
+
+ for part in self.partitions:
+ source = part['source_file']
+ if source:
+ # install source_file contents into a partition
+ cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \
+ (source, image_file, self.sector_size,
+ part['start'], part['size'])
+ exec_cmd(cmd)
+
+ msger.debug("Installed %s in partition %d, sectors %d-%d, "
+ "size %d sectors" % \
+ (source, part['num'], part['start'],
+ part['start'] + part['size'] - 1, part['size']))
+
+ os.rename(source, image_file + '.p%d' % part['num'])
+
+ def create(self):
+ for dev in self.disks.keys():
+ disk = self.disks[dev]
+ disk['disk'].create()
+
+ self.__format_disks()
+
+ return
diff --git a/scripts/lib/wic/utils/runner.py b/scripts/lib/wic/utils/runner.py
new file mode 100644
index 0000000..7431917
--- /dev/null
+++ b/scripts/lib/wic/utils/runner.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import subprocess
+
+from wic import msger
+
+def runtool(cmdln_or_args, catch=1):
+ """ wrapper for most of the subprocess calls
+ input:
+ cmdln_or_args: can be both args and cmdln str (shell=True)
+ catch: 0, quitely run
+ 1, only STDOUT
+ 2, only STDERR
+ 3, both STDOUT and STDERR
+ return:
+ (rc, output)
+ if catch==0: the output will always None
+ """
+
+ if catch not in (0, 1, 2, 3):
+ # invalid catch selection, will cause exception, that's good
+ return None
+
+ if isinstance(cmdln_or_args, list):
+ cmd = cmdln_or_args[0]
+ shell = False
+ else:
+ import shlex
+ cmd = shlex.split(cmdln_or_args)[0]
+ shell = True
+
+ if catch != 3:
+ dev_null = os.open("/dev/null", os.O_WRONLY)
+
+ if catch == 0:
+ sout = dev_null
+ serr = dev_null
+ elif catch == 1:
+ sout = subprocess.PIPE
+ serr = dev_null
+ elif catch == 2:
+ sout = dev_null
+ serr = subprocess.PIPE
+ elif catch == 3:
+ sout = subprocess.PIPE
+ serr = subprocess.STDOUT
+
+ try:
+ process = subprocess.Popen(cmdln_or_args, stdout=sout,
+ stderr=serr, shell=shell)
+ (sout, serr) = process.communicate()
+ # combine stdout and stderr, filter None out
+ out = ''.join(filter(None, [sout, serr]))
+ except OSError, err:
+ if err.errno == 2:
+ # [Errno 2] No such file or directory
+ msger.error('Cannot run command: %s, lost dependency?' % cmd)
+ else:
+ raise # relay
+ finally:
+ if catch != 3:
+ os.close(dev_null)
+
+ return (process.returncode, out)
+
+def show(cmdln_or_args):
+ # show all the message using msger.verbose
+
+ rcode, out = runtool(cmdln_or_args, catch=3)
+
+ if isinstance(cmdln_or_args, list):
+ cmd = ' '.join(cmdln_or_args)
+ else:
+ cmd = cmdln_or_args
+
+ msg = 'running command: "%s"' % cmd
+ if out:
+ out = out.strip()
+ if out:
+ msg += ', with output::'
+ msg += '\n +----------------'
+ for line in out.splitlines():
+ msg += '\n | %s' % line
+ msg += '\n +----------------'
+
+ msger.verbose(msg)
+ return rcode
+
+def outs(cmdln_or_args, catch=1):
+ # get the outputs of tools
+ return runtool(cmdln_or_args, catch)[1].strip()
+
+def quiet(cmdln_or_args):
+ return runtool(cmdln_or_args, catch=0)[0]
diff --git a/scripts/lib/wic/utils/syslinux.py b/scripts/lib/wic/utils/syslinux.py
new file mode 100644
index 0000000..aace286
--- /dev/null
+++ b/scripts/lib/wic/utils/syslinux.py
@@ -0,0 +1,58 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# AUTHOR
+# Adrian Freihofer <adrian.freihofer (at] neratec.com>
+
+
+import re
+from wic import msger
+
+
+def serial_console_form_kargs(kernel_args):
+ """
+ Create SERIAL... line from kernel parameters
+
+ syslinux needs a line SERIAL port [baudrate [flowcontrol]]
+ in the syslinux.cfg file. The config line is generated based
+ on kernel boot parameters. The the parameters of the first
+ ttyS console are considered for syslinux config.
+ @param kernel_args kernel command line
+ @return line for syslinux config file e.g. "SERIAL 0 115200"
+ """
+ syslinux_conf = ""
+ for param in kernel_args.split():
+ param_match = re.match("console=ttyS([0-9]+),?([0-9]*)([noe]?)([0-9]?)(r?)", param)
+ if param_match:
+ syslinux_conf += "SERIAL " + param_match.group(1)
+ # baudrate
+ if param_match.group(2):
+ syslinux_conf += " " + param_match.group(2)
+ # parity
+ if param_match.group(3) and param_match.group(3) != 'n':
+ msger.warning("syslinux does not support parity for console. {} is ignored."
+ .format(param_match.group(3)))
+ # number of bits
+ if param_match.group(4) and param_match.group(4) != '8':
+ msger.warning("syslinux supports 8 bit console configuration only. {} is ignored."
+ .format(param_match.group(4)))
+ # flow control
+ if param_match.group(5) and param_match.group(5) != '':
+ msger.warning("syslinux console flowcontrol configuration. {} is ignored."
+ .format(param_match.group(5)))
+ break
+
+ return syslinux_conf