Skip to content

check-run-cron-qemu #28

check-run-cron-qemu

check-run-cron-qemu #28

# WARNING: DANGER: What is otherwise considered bad practice (eg. direct use of '/dev/loop*') may be allowed here . Intended only for isolated (ie. virtualized) ephemeral hosts.
# DANGER: Github Actions ONLY!
# DANGER: Do NOT use for any kind of release, push, etc .
# (ie. may bypass hash)
#cd ..
#[[ -e ./ubDistBuild ]] || git clone --depth 1 --recursive https://github.com/mirage335-colossus/ubiquitous_bash.git
#cd ../ubDistBuild
# _get_vmImg-live --> rm/_install_ubDistBuild
#--> _openChRoot/_closeChRoot --> echo ' sleep /waitFile ... _revert_live | tee /mnt/cache/log ' | crontab
#--> _convert-live
#--> mkfs.btrfs ./_local/cache.img
#--> qemu
#--> mount/umount -t btrfs ./_local/cache.img ./_local/cache_fs
#--> PASS/FAIL (from logs)
name: check-run-cron-qemu
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
actions: none
checks: none
contents: read
deployments: none
issues: none
packages: none
pull-requests: none
repository-projects: none
security-events: none
statuses: none
on:
#push:
workflow_dispatch:
inputs:
releaseLabel:
required: false
#default: base
default: latest
type: choice
options:
- base
- latest
- internal
devfast:
type: boolean
default: true
skimfast:
type: boolean
default: true
qemuNoKVM:
type: boolean
default: false
runnerName:
required: false
default: ubuntu-latest-m
type: choice
options:
- ubuntu-latest
- ubuntu-latest-m
#debug_enabled:
#type: boolean
#default: false
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#schedule
schedule:
#- cron: '5 7 * * 6'
#- cron: '5 1 * * 6'
#- cron: '5 1 * * 2,4'
#- cron: '5 1 * * 2'
#- cron: '5 1 * * 4'
#- cron: '5 1 * * 5'
#- cron: '5 7 15 * *'
- cron: '25 6 1 * *'
# https://stackoverflow.com/questions/62750603/github-actions-trigger-another-action-after-one-action-is-completed
#workflow_run:
#workflows: ["build"]
#types:
#- completed
# https://docs.github.com/en/actions/using-jobs/using-concurrency
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check_run_cronQemu:
runs-on: ${{ github.event.inputs.runnerName == '' && 'ubuntu-latest' || github.event.inputs.runnerName }}
steps:
- name: users
shell: bash
run: |
sudo -u ubuntu -n bash -c 'sudo -n useradd runner --non-unique -u $UID -g $UID' || true
sudo -u ubuntu -n bash -c 'sudo -n groupadd runner --non-unique -g $UID' || true
sudo -u runner -n bash -c 'sudo -n echo $USER $UID' || true
true
# Apparently may increase buildJet 'runner' to 77GB (instead of 61GB).
# Apparently may increase Github Actions 'runner' to 59GB (instead of 31GB) .
- name: Maximize build space
if: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
uses: easimon/maximize-build-space@master
with:
root-reserve-mb: 1625
temp-reserve-mb: 50
swap-size-mb: 2
#remove-dotnet: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
remove-dotnet: 'true'
#remove-android: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
remove-android: 'true'
#remove-haskell: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
remove-haskell: 'true'
#remove-codeql: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
remove-codeql: 'true'
#remove-docker-images: ${{ github.event.inputs.runnerName != 'ubuntu-latest-m' }}
remove-docker-images: 'true'
# https://github.com/orgs/community/discussions/8305
# https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/
# https://github.com/actions/runner-images/discussions/7191
- name: Enable KVM group perms
if: ${{ github.event.inputs.runnerName == 'ubuntu-latest-m' }}
shell: bash
run: |
#echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo -n tee /etc/udev/rules.d/99-kvm4all.rules
echo 'KERNEL=="kvm", GROUP="docker", MODE="0664", OPTIONS+="static_node=kvm"' | sudo -n tee /etc/udev/rules.d/99-kvm4all.rules
sudo -n udevadm control --reload-rules
sudo -n udevadm trigger --name-match=kvm
sudo -n apt-get update
sudo -n apt-get install -y libvirt-clients libvirt-daemon-system libvirt-daemon virtinst bridge-utils qemu qemu-system-x86
sudo -n usermod -a -G kvm $USER
sudo -n usermod -a -G libvirt $USER
sudo -n usermod -a -G docker $USER
sudo -n adduser $USER kvm
#sudo -n chown -R $USER:kvm /dev/kvm
sudo -n chown -R $USER:docker /dev/kvm
ls -l /dev/kvm
ls -l /dev/kvm*
echo $USER
groups
sudo -n lsmod | grep kvm
sudo -n modprobe -r kvm_intel
sudo -n modprobe -r kvm_amd
sudo -n modprobe -r kvm
( grep --color vmx /proc/cpuinfo && sudo -n modprobe kvm_intel ) || ( grep --color svm /proc/cpuinfo && sudo -n modprobe kvm_amd )
sudo -n modprobe kvm
sudo -n lsmod | grep kvm
#sudo -n chown -R $USER:kvm /dev/kvm
sudo -n chown -R $USER:docker /dev/kvm
ls -l /dev/kvm
ls -l /dev/kvm*
- name: Check KVM group perms
if: ${{ github.event.inputs.runnerName == 'ubuntu-latest-m' }}
shell: bash
run: |
grep --color svm /proc/cpuinfo || true
grep --color vmx /proc/cpuinfo || true
sudo -n lsmod | grep kvm
ls -l /dev/kvm
ls -l /dev/kvm*
echo $USER
groups
- name: Force KVM group perms
if: ${{ github.event.inputs.runnerName == 'ubuntu-latest-m' }}
shell: bash
run: |
sudo -n ls -l /dev/kvm
sudo -n ls -l /dev/kvm*
sudo -n chown -R $USER:docker /dev/kvm
sudo -n chmod 664 /dev/kvm
echo
#grep --color svm /proc/cpuinfo || true
#grep --color vmx /proc/cpuinfo || true
sudo -n lsmod | grep kvm
ls -l /dev/kvm
ls -l /dev/kvm*
echo $USER
groups
echo
- name: _getCore_ub
shell: bash
run: |
mkdir -p ~/core/infrastructure
cd ~/core/infrastructure
git clone --depth 1 --recursive https://github.com/mirage335-colossus/ubiquitous_bash.git
cd ubiquitous_bash
./_setupUbiquitous.bat
- name: _getMinimal_cloud
shell: bash
run: |
! ~/core/infrastructure/ubiquitous_bash/ubiquitous_bash.sh _getMinimal_cloud && exit 1
true
#! sudo -n apt-get -y clean && exit 1
df -h
df -h /
- uses: actions/checkout@v2
with:
submodules: recursive
#- name: _getMinimal_cloud
#shell: bash
#run: |
#! ./ubiquitous_bash.sh _getMinimal_cloud && exit 1
#true
##! sudo -n apt-get -y clean && exit 1
#df -h
#df -h /
# For VBoxManage for _convert . Otherwise historically not apparently necessary.
#- name: _getMost_ubuntu22-VBoxManage
#shell: bash
#run: |
##! ./ubiquitous_bash.sh _getMost && exit 1
##true
##! sudo -n apt-get -y clean && exit 1
##! ./ubiquitous_bash.sh _getMost_debian11_aptSources && exit 1
##sudo -n apt-get update
##! sudo -n apt-get -d install -y virtualbox-7.0 && exit 1
#! sudo -n ./ubiquitous_bash.sh _getMost_ubuntu22-VBoxManage && exit 1
#df -h
#df -h /
- name: _getMost-xvfb
shell: bash
run: |
#! ./ubiquitous_bash.sh _getMost && exit 1
#true
#! sudo -n apt-get -y clean && exit 1
#! ./ubiquitous_bash.sh _getMost_debian11_aptSources && exit 1
#sudo -n apt-get update
#! sudo -n apt-get -d install -y virtualbox-7.0 && exit 1
#! sudo -n ./ubiquitous_bash.sh _getMost_ubuntu22-VBoxManage && exit 1
sudo -n env DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install --install-recommends -y xvfb
sudo -n env DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install --install-recommends -y x11-apps
df -h
df -h /
- name: _test_hash_legacy
shell: bash
run: |
if [[ -e "/etc/ssl/openssl_legacy.cnf" ]]
then
echo -n | env OPENSSL_CONF="/etc/ssl/openssl_legacy.cnf" openssl dgst -whirlpool -binary | xxd -p -c 256
exit ${PIPESTATUS[0]}
else
echo -n | openssl dgst -whirlpool -binary | xxd -p -c 256
exit ${PIPESTATUS[0]}
fi
#- name: txt-stat/tboot
#shell: bash
#run: |
#cat /proc/cpuinfo | grep 'model name'
#echo
#sudo -n txt-stat || true
#true
- name: sev/AMD
shell: bash
run: |
cat /proc/cpuinfo | grep 'model name'
echo
echo 'cat /sys/module/kvm_amd/parameters/sev'
cat /sys/module/kvm_amd/parameters/sev || true
echo 'dmesg | grep -i sev'
sudo -n dmesg | grep -i sev || true
true
- name: sgx/Intel
shell: bash
run: |
cat /proc/cpuinfo | grep 'model name'
echo
echo 'grep sgx /proc/cpuinfo'
grep sgx /proc/cpuinfo || true
echo 'dmesg | grep sgx'
sudo -n dmesg | grep -i sgx || true
# Apparently normal: ' sgx: [Firmware Bug]: Unable to map EPC section to online node. Fallback to the NUMA node 0. '
true
# ### ##### _get_vmImg --> rm/_install_ubDistBuild
- name: _get_vmImg_ubDistBuild
shell: bash
run: |
current_releaseLabel="${{ inputs.releaseLabel }}"
#[[ "$current_releaseLabel" == "" ]] && current_releaseLabel="base"
[[ "$current_releaseLabel" == "" ]] && current_releaseLabel="latest"
#export FORCE_AXEL=8
#./ubiquitous_bash.sh _get_vmImg_ubDistBuild "$current_releaseLabel"
# DANGER: Github Actions ONLY!
# DANGER: Do NOT use for any kind of release, push, etc .
export FORCE_AXEL=8
export MANDATORY_HASH="true"
cd ./_local
[[ "$current_releaseLabel" == "latest" ]] && current_releaseLabel=""
../ubiquitous_bash.sh _wget_githubRelease_join-stdout "soaringDistributions/ubDistBuild" "$current_releaseLabel" "package_image.tar.flx" 2> /dev/null | ../ubiquitous_bash.sh _get_extract_ubDistBuild
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: _create_ubDistBuild-install-ubDistBuild
shell: bash
run: |
./_create_ubDistBuild-install-ubDistBuild
# ### ##### --> _openChRoot/_closeChRoot --> echo ' sleep /waitFile ... _run | tee /mnt/cache/log ' | crontab
- name: _run-crontab
shell: bash
run: |
./ubiquitous_bash.sh _openChRoot
#echo '/root/ubiquitous_bash.sh _wait_rootLock_OTHER'
#ls / /run/live/medium
#du -sh /vm.img /package_rootfs.tar /run/live/medium/extIface.exe /run/live/medium/ubDistBuild.exe
#echo cache | base64 == Y2FjaGUK
( ./ubiquitous_bash.sh _chroot crontab -l ; echo '@reboot /bin/true uk4uPhB6 ; /usr/bin/mkdir -p /media/tmp ; /usr/bin/mount -o compress=zstd:2 -L Y2FjaGUK /media/tmp ; cp -a /home/user/ubDistBuild /media/tmp/ ; /bin/bash -c "export skimfast=${{ inputs.skimfast }} ; export devfast=${{ inputs.devfast }} ; cd /home/user ; /bin/sudo -n --preserve-env=skimfast,devfast -u user ls / >> /media/tmp/_ls-fromImg.log 2>&1 ; /bin/true > /media/tmp/_run-crontab.log ; /bin/sleep 150 ; wait ; /bin/true > /var/log/_run-crontab.log ; sudo -n /usr/sbin/poweroff ; sudo -n /sbin/poweroff ; sudo -n poweroff"' ) | ./ubiquitous_bash.sh _chroot crontab '-'
./ubiquitous_bash.sh _chroot crontab -l
./ubiquitous_bash.sh _closeChRoot
# ### ##### --> mkfs.btrfs ./_local/cache.img
- name: mkfs.btrfs ./_local/cache.img
shell: bash
run: |
fallocate -l 1000000000 ./_local/cache.img
mkfs.btrfs -L Y2FjaGUK --checksum xxhash -M -d single ./_local/cache.img
# ### ##### --> qemu-fromImg
#_qemu_system_x86_64 -nographic -usb -smp 4 -device usb-tablet -device qxl-vga -machine accel=kvm -bios /usr/share/OVMF/OVMF_CODE.fd -drive format=raw,file=/home/runner/work/ubDistBuild/ubDistBuild/_local/vm.img -drive file=/home/runner/work/ubDistBuild/ubDistBuild/v_uid/htg/htg.iso,media=cdrom -boot d -m 1664 -net nic,model=rtl8139 -net user,restrict=n
- name: Force KVM group perms
if: ${{ github.event.inputs.runnerName == 'ubuntu-latest-m' }}
shell: bash
run: |
sudo -n ls -l /dev/kvm
sudo -n ls -l /dev/kvm*
sudo -n chown -R $USER:docker /dev/kvm
sudo -n chmod 664 /dev/kvm
echo
#grep --color svm /proc/cpuinfo || true
#grep --color vmx /proc/cpuinfo || true
sudo -n lsmod | grep kvm
ls -l /dev/kvm
ls -l /dev/kvm*
echo $USER
groups
echo
- name: qemu-fromImg
shell: bash
continue-on-error: true
run: |
#sudo -n losetup /dev/loop7 ./_local/cache.img || true
export skimfast=${{ inputs.skimfast }}
echo skimfast $skimfast
export qemuNoKVM=${{ inputs.qemuNoKVM }}
echo qemuNoKVM $qemuNoKVM
echo
bash -c '
qemuArgs+=(-usb -smp $(nproc) -device usb-tablet -device qxl-vga)
#[[ "$qemuHeadless" == "true" ]] && qemuArgs+=(-nographic)
if ./ubiquitous_bash.sh _testQEMU_hostArch_x64_hardwarevt
then
# Apparently, qemu kvm, can be unreliable if nested (eg. within VMWare Workstation VM).
if [[ "$qemuNoKVM" == "true" ]] || [[ "$qemuNoKVM" != "false" ]]
then
_messagePlain_good "ignored: kvm"
else
qemuArgs+=(-machine accel=kvm)
fi
else
echo "missing: kvm"
fi
[[ -e /usr/share/OVMF/OVMF_CODE.fd ]] && qemuArgs+=(-bios /usr/share/OVMF/OVMF_CODE.fd)
qemuUserArgs+=(-drive format=raw,file=./_local/vm.img)
#qemuUserArgs+=(-drive file=./v_uid/htg/htg.iso,media=cdrom)
#qemuUserArgs+=(-drive file=./_local/vm-live.iso,media=cdrom)
qemuUserArgs+=(-drive format=raw,file=./_local/cache.img)
qemuUserArgs+=(-boot c)
#qemuUserArgs+=(-boot d)
#qemuUserArgs+=(-m "6144")
qemuUserArgs+=(-m "1664")
qemuUserArgs+=(-net nic,model=rtl8139 -net user,restrict=n)
qemuArgs+=("${qemuSpecialArgs[@]}" "${qemuUserArgs[@]}")
Xvfb :30 > /dev/null 2>&1 &
sleep 1
export DISPLAY=:30
echo qemu-system-x86_64 "${qemuArgs[@]}"
qemu-system-x86_64 "${qemuArgs[@]}" &
currentPID=$!
currentExitStatus=0
mkdir -p ./_local/analysis/screenshots
currentIterationWait=0
currentIterationMax=380
! ./ubiquitous_bash.sh _testQEMU_hostArch_x64_hardwarevt && currentIterationMax=22800 || true
while [[ "$currentIterationWait" -lt "$currentIterationMax" ]] && ( pgrep qemu-system > /dev/null 2>&1 || pgrep qemu > /dev/null 2>&1 || ps -p "$currentPID" > /dev/null 2>&1 )
do
sleep 1
echo
echo
echo
echo currentIterationWait="$currentIterationWait"
echo
mkdir -p ./_local/cache_fs
#sudo -n mount -t btrfs -o ro ./_local/cache.img ./_local/cache_fs
#sudo -n mount -t btrfs -o ro /dev/loop7 ./_local/cache_fs
echo "ls ./_local/cache_fs"
sudo -n ls ./_local/cache_fs
echo
echo "cat ./_local/cache_fs/_ls-fromImg.log"
sudo -n cat ./_local/cache_fs/_ls-fromImg.log || true
echo
sudo -n umount /dev/loop7 || true
sudo -n umount ./_local/cache.img || true
sudo -n umount ./_local/cache_fs || true
if ( [[ "$currentIterationWait" -le 320 ]] && [[ $(bc <<< "$currentIterationWait % 5") == 0 ]] ) || [[ $(bc <<< "$currentIterationWait % 30") == 0 ]]
then
#xwd -root -silent | convert xwd:- png:./_local/analysis/screenshots/qemu-01-"$currentIterationWait".png
xwd -root -silent | convert xwd:- -quality 35 jpg:./_local/analysis/screenshots/qemu-01-"$currentIterationWait".jpg
#jp2a --background=dark --colors --width=280 ./_local/analysis/screenshots/qemu-01-"$currentIterationWait".jpg
fi
let currentIterationWait=currentIterationWait+1
done
echo "currentIterationWait=""$currentIterationWait"
[[ "$currentIterationWait" -ge "$currentIterationMax" ]] && echo "bad: fail: run: poweroff" && currentExitStatus=1
#sudo -n losetup -d /dev/loop7
exit "$currentExitStatus"
'
- name: artifacts
uses: actions/upload-artifact@v3
with:
name: convert-live-exhaustive---analysis-screenshots-10-fromImg
path: |
./_local/analysis/screenshots/*
# ### ##### --> mount/umount -t btrfs ./_local/cache.img ./_local/cache_fs
- name: _rm
shell: bash
run: |
./ubiquitous_bash.sh _package_rm || true
rm -f ./_local/vm.img || true
- name: _export-fromImg
shell: bash
run: |
lsmod | grep loop || true
mkdir -p ./_local/cache_fs
sudo -n mount -t btrfs ./_local/cache.img ./_local/cache_fs
ls ./_local/cache_fs
#sudo -n dd if=./_local/cache_fs/ubDistBuild/_local/vm.img bs=1M | dd of=./_local/vm.img bs=1M
- name: artifacts
uses: actions/upload-artifact@v3
with:
name: convert-live-exhaustive---analysis-log-10-fromImg
path: |
./_local/cache_fs/*.log
- name: ______________________________ _ls-fromImg.log
shell: bash
run: |
cat ./_local/cache_fs/_ls-fromImg.log
- name: check
shell: bash
run: |
grep regenerate ./_local/cache_fs/_ls-fromImg.log
grep regenerate_rootGrab ./_local/cache_fs/_ls-fromImg.log
grep lock_nvidia_autoinstall ./_local/cache_fs/_ls-fromImg.log