test: Add ostree image installation and upgrade test and add a
new parallel stage for ostree test
This commit is contained in:
parent
125fce92db
commit
c1b0d348d6
4 changed files with 813 additions and 2 deletions
32
schutzbot/Jenkinsfile
vendored
32
schutzbot/Jenkinsfile
vendored
|
|
@ -209,7 +209,18 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('F32 OSTree') {
|
||||
agent { label "f32cloudbase && psi && x86_64" }
|
||||
steps {
|
||||
unstash 'fedora32'
|
||||
run_tests('ostree')
|
||||
}
|
||||
post {
|
||||
always {
|
||||
preserve_logs('fedora32-ostree')
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('EL8 Base') {
|
||||
agent { label "rhel8cloudbase && x86_64 && aws" }
|
||||
environment {
|
||||
|
|
@ -317,6 +328,18 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
stage('EL8.3 OSTree') {
|
||||
agent { label "rhel83cloudbase && psi && x86_64" }
|
||||
steps {
|
||||
unstash 'rhel83'
|
||||
run_tests('ostree')
|
||||
}
|
||||
post {
|
||||
always {
|
||||
preserve_logs('rhel83-ostree')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -375,6 +398,13 @@ void run_tests(test_type) {
|
|||
)
|
||||
}
|
||||
|
||||
if (test_type == 'ostree') {
|
||||
sh (
|
||||
label: "OSTree tests",
|
||||
script: "test/image-tests/ostree.sh"
|
||||
)
|
||||
}
|
||||
|
||||
if (test_type == 'integration') {
|
||||
// Run the qcow2 test.
|
||||
sh (
|
||||
|
|
|
|||
|
|
@ -16,4 +16,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDCv+bp9I2UqM4msF8D2deAp8s7LjdmH4zHZgioKFRc
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDjBCoSJv5HrA0PquLDJYU36oPI0H23AmU2gs8F3k6kBA71N9pHbs/C+YxFYyrsjLqaLwdo44sE2vXB5igJTZN/YpHYytbKHIGdnh3qc0myMvxDLJUxA1kxx3XfT8vbJ0AHrRIJdV6lxfrifI92a0vqRL1pAuWzW/NYytk6DeYDXbUy4xtHOlRIf2WXqZ2pZXsFleaYMdfmUP0y0awe5YDQsNkOBD2HDbU9669jP+UB0nirVuxDhZr1kZmQ8QdQxlePN5LN0RmbNY5Hm1/2SDuXhWh90/fbbWoCkrLTlkAUHckZPmGiEBhD2n4c7jHc25/fhaZsjZITRzHKP5wWW7LE3tXJVMF4eFEzgCoYEr6RpdEwQqQl86WiA0VuLQZbfju5T8t3y6PcGvXD5BF5BJuq710Uqw6y+gy4gNepO4U32jwBz2DDMMBhHsgjSmPfdTvqrjUnlt9NWTXnVDP34Zc5V+RwCgyB09gkwqW+Ae0jDoRSo1hTBSi7v9Gp5R/Hed0= msehnout-4@redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRhju+UtbgUyUzE56Hr8fcRsy0lwk6SKDGn6M7TQ+B0EL5mMTmylbGZ4eP3h+II+GVF5DcoDfXolQvSVn6kSz5lSq/adlbAgGZdxd8Q6N1V+zmMejQf3ZpKQoFWFkmzQoHV+yJkMtWGz//RYqn6eHXvuyDxGyCsdEzBnt10VZL9/g+C45w8l5FlMdGHOhBIkZg1UDLL+2pu8/t4DuPskyr83Thrmb0eZBC4BLLS8Ob9BSZroUgDE1BoSGmfUVnt2Yipjj9qTJ1vWpZPGuqXyCtHHBneJA1b+Hu21CCX/FtSWBNgkbGW2l0rgl8py/GIjqYXPQG0UHkpg6RNlXDARb/GxJVRA2rILX7BsnndKlEh+Kw9Q/IgxpcR5RxZwbH1hYxX1igVyrRW2kAobJhazGV5gBfgm4tZ05dqBL6QX/PqTd6lC5jsyRStwCYIwkh0+Knl2WRagRK/matzvX6MmQXV/vArtY2S4sG73lWN/Af5PnWXQRHRTAyZdSbQC/Ti8c= msehnout-5@redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDoVIpowzsQseMML/7I9LcA18G7quBjp2te6GHX9g2IrKh3PxssIS4jQSskRePaCl1yLWhTRDgADkDuOwd8SrQFy7eo7I/GD2wDcdLaWYy4GkdV1M9nhquqpbdLQ2m/3sRhqdHEHadGuw0iYdNgR0M5sNig+xL+G+G4MvjL7hcuaC7GUVbAFWQgePwFWLxjTmX2zmpKQMwk1r4n/i4Wj2GJ7CayxOc9qtnLdnZm9Tj3WA/aRLdbnlM0TCzXrtfO5OLVa9/n3AHY4qqZ1PmRCizLlLtvDeTjhM5qTYCk/xnUNCVnPhe+YqkqXlV6D/D7SI6EIiEd+GI+qJsHt9AgKZpr msehnout-6@redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY/ylCrPBzil4TnZR4tWULpz3QgfBMQyEnMOHDAJNp/FK70hD+PUiRm3UY96pmGXonQvqiDoyPuVh025FkWshPK91Dyq8QD8h25q5C5Cg6kMgBpdGzbX44ksms1KyOHmSZ48MpWw3PFOrlNP1vysr6Imjz9Jixmx4sOZvqKnrbsbOW04gowVzpZM8m048lvf6/KhqeImfeSRc9Rtpos8GqEQVlwRevE1qBON963V1QtFOrm9weoQgb369SdqRRdxaGNAymNh3d78DneOWXmEyBflLSpIDx5I2s/1NB1Dp95Bp3VvlV3CH1HC7LAFKYi+xsz3/KHdgtvgShX6LFSdsp rvykydal@dhcp-lab-144.englab.brq.redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY/ylCrPBzil4TnZR4tWULpz3QgfBMQyEnMOHDAJNp/FK70hD+PUiRm3UY96pmGXonQvqiDoyPuVh025FkWshPK91Dyq8QD8h25q5C5Cg6kMgBpdGzbX44ksms1KyOHmSZ48MpWw3PFOrlNP1vysr6Imjz9Jixmx4sOZvqKnrbsbOW04gowVzpZM8m048lvf6/KhqeImfeSRc9Rtpos8GqEQVlwRevE1qBON963V1QtFOrm9weoQgb369SdqRRdxaGNAymNh3d78DneOWXmEyBflLSpIDx5I2s/1NB1Dp95Bp3VvlV3CH1HC7LAFKYi+xsz3/KHdgtvgShX6LFSdsp rvykydal@dhcp-lab-144.englab.brq.redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtJv3QKdqQ+0+jJND7bXVq9ux87yyi4qyJk7iOsX2VsgAUuYXpBf337p5yNB3N1kjOwGYSDjvDvS7GuhdatuvJI3/xzcyodbwJp32AT76e9uvUQHTBBGmUvBLzw3nk8ZDNp5d4rt2cZvlhv7lzDSt30DF14ivg5Xp/V0tK0BEfFlvYHuHheDeiSOQRQ392J7TefPQOW+JpxANU4Bxc1aHIettaIqQMWm9r4ZELd8M83IYt5Btp1bPsnfYywQMYqNXyDuhwhcsBTR5kVObP0DwxKZbMNPmA2lBvrX2GMIa+qfvKIW87KooaoPLt7CR7/DKfQ1S492L1wIwNUPUBLsQD xiaofwan@dhcp-8-203.nay.redhat.com
|
||||
|
|
|
|||
301
test/image-tests/check_ostree.yaml
Normal file
301
test/image-tests/check_ostree.yaml
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
---
|
||||
- hosts: ostree_guest
|
||||
become: no
|
||||
vars:
|
||||
workspace: "{{ lookup('env', 'WORKSPACE') }}"
|
||||
total_counter: "0"
|
||||
failed_counter: "0"
|
||||
|
||||
tasks:
|
||||
# case: check ostree commit correctly updated
|
||||
- name: get deployed ostree commit
|
||||
command: rpm-ostree status --json
|
||||
register: result_commit
|
||||
|
||||
- name: make a json result
|
||||
set_fact:
|
||||
deploy_commit: "{{ result_commit.stdout | from_json | json_query('deployments[0].checksum') }}"
|
||||
|
||||
- name: check commit deployed and built
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- deploy_commit == ostree_commit
|
||||
fail_msg: "deployed ostree commit is not commit built by osbuild-composer"
|
||||
success_msg: "successful building and deployment"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case from bug: https://bugzilla.redhat.com/show_bug.cgi?id=1848453
|
||||
- name: check ostree-remount status
|
||||
command: systemctl is-active ostree-remount.service
|
||||
register: result_remount
|
||||
|
||||
- name: ostree-remount should be started
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_remount.stdout == "active"
|
||||
fail_msg: "ostree-remount is not started by default"
|
||||
success_msg: "starting ostree-remount successful"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /sysroot moutn point
|
||||
- name: check /sysroot mount point
|
||||
command: findmnt -r -o SOURCE -n /sysroot
|
||||
register: result_sysroot_mount_point
|
||||
|
||||
- name: /sysroot should be mounted on /dev/vda2
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_sysroot_mount_point.stdout == "/dev/vda2"
|
||||
fail_msg: "/var does not mount on /dev/vda2"
|
||||
success_msg: "/var mounts on /dev/vda2"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /sysroot mount status
|
||||
- name: check /sysroot mount status
|
||||
shell: findmnt -r -o OPTIONS -n /sysroot | awk -F "," '{print $1}'
|
||||
register: result_sysroot_mount_status
|
||||
|
||||
- name: /sysroot should be mount with rw permission
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_sysroot_mount_status.stdout == "rw"
|
||||
fail_msg: "/sysroot is not mounted with rw permission"
|
||||
success_msg: "/sysroot is mounted with rw permission"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /var mount point
|
||||
- name: check /var mount point
|
||||
command: findmnt -r -o SOURCE -n /var
|
||||
register: result_var_mount_point
|
||||
|
||||
- name: "/var should be mounted on /dev/vda2[/ostree/deploy/{{ image_type }}/var]"
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_var_mount_point.stdout == "/dev/vda2[/ostree/deploy/{{ image_type }}/var]"
|
||||
fail_msg: "/var does not mount on /dev/vda2[/ostree/deploy/{{ image_type }}/var]"
|
||||
success_msg: "/var mounts on /dev/vda2[/ostree/deploy/{{ image_type }}/var]"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /var mount status
|
||||
- name: check /var mount status
|
||||
shell: findmnt -r -o OPTIONS -n /var | awk -F "," '{print $1}'
|
||||
register: result_var_mount_status
|
||||
|
||||
- name: /var should be mount with rw permission
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_var_mount_status.stdout == "rw"
|
||||
fail_msg: "/var is not mounted with rw permission"
|
||||
success_msg: "/var is mounted with rw permission"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /usr mount point
|
||||
- name: check /usr mount point
|
||||
command: findmnt -r -o SOURCE -n /usr
|
||||
register: result_usr_mount_point
|
||||
|
||||
- name: "/usr should be mounted on /dev/vda2[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_usr_mount_point.stdout == "/dev/vda2[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
||||
fail_msg: "/usr does not mount on /dev/vda2[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
||||
success_msg: "/usr mounts on /dev/vda2[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check /usr mount status
|
||||
- name: check /usr mount status
|
||||
shell: findmnt -r -o OPTIONS -n /usr | awk -F "," '{print $1}'
|
||||
register: result_usr_mount_status
|
||||
|
||||
- name: /usr should be mount with rw permission
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_usr_mount_status.stdout == "ro"
|
||||
fail_msg: "/usr is not mounted with ro permission"
|
||||
success_msg: "/usr is mounted with ro permission"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
- name: get the first 10 chars in commit hash
|
||||
set_fact:
|
||||
commit_log: "{{ deploy_commit[:11] }}"
|
||||
|
||||
# case: check wget installed after upgrade
|
||||
- name: check installed package
|
||||
shell: rpm -qa | sort
|
||||
register: result_packages
|
||||
|
||||
- name: check wget installed
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- "'wget' in result_packages.stdout"
|
||||
fail_msg: "wget not installed, ostree upgrade might be failed"
|
||||
success_msg: "wget installed in ostree upgrade"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
- name: save installed package to log file
|
||||
copy:
|
||||
content: "{{ result_packages.stdout }}"
|
||||
dest: "{{ workspace }}/{{ commit_log }}.installed.ostree.log"
|
||||
delegate_to: localhost
|
||||
|
||||
# case: check ostree-remount mount log
|
||||
- name: check ostree-remount mount log
|
||||
command: journalctl -u ostree-remount
|
||||
register: result_remount_jounalctl
|
||||
|
||||
- name: ostree-remount should remount /var and /sysroot
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- "'/sysroot' in result_remount_jounalctl.stdout"
|
||||
- "'/var' in result_remount_jounalctl.stdout"
|
||||
fail_msg: "/sysroot or /var are not remounted by ostree-remount"
|
||||
success_msg: "/sysroot and /var are remount"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
# case: check dmesg error and failed log
|
||||
- name: check dmesg output
|
||||
command: dmesg
|
||||
register: result_dmesg
|
||||
|
||||
- name: save dmesg output to log file
|
||||
copy:
|
||||
content: "{{ result_dmesg.stdout }}"
|
||||
dest: "{{ workspace }}/{{ commit_log }}.dmesg.ostree.log"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: check dmesg error and fail log
|
||||
shell: dmesg --notime | grep -i "error\|fail"
|
||||
register: result_dmesg_error
|
||||
|
||||
- name: no more error or failed log
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_dmesg_error.stdout_lines | length == 2
|
||||
- "'pcieport 0000:00:01.6: Failed to check link status' in result_dmesg_error.stdout"
|
||||
- "'Error: Driver \\'pcspkr\\' is already registered, aborting' in result_dmesg_error.stdout"
|
||||
fail_msg: "more or less error and failed log"
|
||||
success_msg: "everything works as expected"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
when: ansible_facts['distribution'] == "RedHat"
|
||||
|
||||
- name: no more error or failed log
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- result_dmesg_error.stdout_lines | length == 2
|
||||
- "'pcieport 0000:00:01.6: pciehp: Failed to check link status' in result_dmesg_error.stdout"
|
||||
- "'RAS: Correctable Errors collector initialized' in result_dmesg_error.stdout"
|
||||
fail_msg: "more or less error and failed log"
|
||||
success_msg: "everything works as expected"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
when: ansible_facts['distribution'] == "Fedora"
|
||||
|
||||
# case: check running container with podman
|
||||
- name: run ubi8 image
|
||||
command: podman run ubi8-minimal:latest cat /etc/redhat-release
|
||||
register: podman_result
|
||||
|
||||
- name: run container test
|
||||
block:
|
||||
- assert:
|
||||
that:
|
||||
- podman_result is succeeded
|
||||
- "'Red Hat Enterprise Linux release' in podman_result.stdout"
|
||||
fail_msg: "failed run container with podman"
|
||||
success_msg: "running container with podman successed"
|
||||
always:
|
||||
- set_fact:
|
||||
total_counter: "{{ total_counter | int + 1 }}"
|
||||
rescue:
|
||||
- name: failed count + 1
|
||||
set_fact:
|
||||
failed_counter: "{{ failed_counter | int + 1 }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- failed_counter == "0"
|
||||
fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed"
|
||||
success_msg: "Totally {{ total_counter }} test passed"
|
||||
479
test/image-tests/ostree.sh
Executable file
479
test/image-tests/ostree.sh
Executable file
|
|
@ -0,0 +1,479 @@
|
|||
#!/bin/bash
|
||||
set -euox pipefail
|
||||
|
||||
# Get OS data.
|
||||
source /etc/os-release
|
||||
ARCH=$(uname -m)
|
||||
|
||||
# Set os-variant and boot location used by virt-install.
|
||||
case "${ID}-${VERSION_ID}" in
|
||||
# Bypass ostree test on fedora-31 and rhel 8.2
|
||||
"fedora-31")
|
||||
exit 0;;
|
||||
"rhel-8.2")
|
||||
exit 0;;
|
||||
"fedora-32")
|
||||
IMAGE_TYPE=fedora-iot-commit
|
||||
OSTREE_REF="fedora/32/${ARCH}/iot"
|
||||
OS_VARIANT="fedora32"
|
||||
BOOT_LOCATION="https://mirrors.rit.edu/fedora/fedora/linux/releases/32/Everything/x86_64/os/";;
|
||||
"rhel-8.3")
|
||||
IMAGE_TYPE=rhel-edge-commit
|
||||
OSTREE_REF="rhel/8/${ARCH}/edge"
|
||||
OS_VARIANT="rhel8-unknown"
|
||||
BOOT_LOCATION="http://download.devel.redhat.com/rhel-8/nightly/RHEL-8/latest-RHEL-8.3/compose/BaseOS/x86_64/os/";;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
|
||||
# Colorful output.
|
||||
function greenprint {
|
||||
echo -e "\033[1;32m${1}\033[0m"
|
||||
}
|
||||
|
||||
# Mock is only available in EPEL for RHEL.
|
||||
if [[ $ID == rhel ]] && ! rpm -q epel-release; then
|
||||
greenprint "📦 Setting up EPEL repository"
|
||||
curl -Ls --retry 5 --output /tmp/epel.rpm \
|
||||
https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||
sudo rpm -Uvh /tmp/epel.rpm
|
||||
fi
|
||||
|
||||
# Install required packages.
|
||||
greenprint "📦 Installing required packages"
|
||||
sudo dnf -y install jq libvirt-client libvirt-daemon \
|
||||
libvirt-daemon-config-network libvirt-daemon-config-nwfilter \
|
||||
libvirt-daemon-driver-interface libvirt-daemon-driver-network \
|
||||
libvirt-daemon-driver-nodedev libvirt-daemon-driver-nwfilter \
|
||||
libvirt-daemon-driver-qemu libvirt-daemon-driver-secret \
|
||||
libvirt-daemon-driver-storage libvirt-daemon-driver-storage-disk \
|
||||
libvirt-daemon-kvm qemu-img qemu-kvm virt-install expect \
|
||||
python3-lxml ansible httpd
|
||||
|
||||
# Start libvirtd and test it.
|
||||
greenprint "🚀 Starting libvirt daemon"
|
||||
sudo systemctl start libvirtd
|
||||
sudo virsh list --all > /dev/null
|
||||
|
||||
# Set a customized dnsmasq configuration for libvirt so we always get the
|
||||
# same address on bootup.
|
||||
sudo tee /tmp/integration.xml > /dev/null << EOF
|
||||
<network>
|
||||
<name>integration</name>
|
||||
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
|
||||
<forward mode='nat'>
|
||||
<nat>
|
||||
<port start='1024' end='65535'/>
|
||||
</nat>
|
||||
</forward>
|
||||
<bridge name='integration' stp='on' delay='0'/>
|
||||
<mac address='52:54:00:36:46:ef'/>
|
||||
<ip address='192.168.100.1' netmask='255.255.255.0'>
|
||||
<dhcp>
|
||||
<range start='192.168.100.2' end='192.168.100.254'/>
|
||||
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
if ! sudo virsh net-info integration > /dev/null 2>&1; then
|
||||
sudo virsh net-define /tmp/integration.xml
|
||||
sudo virsh net-start integration
|
||||
fi
|
||||
|
||||
# Allow anyone in the wheel group to talk to libvirt.
|
||||
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
|
||||
WHEEL_GROUP=wheel
|
||||
if [[ $ID == rhel ]]; then
|
||||
WHEEL_GROUP=adm
|
||||
fi
|
||||
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
|
||||
polkit.addRule(function(action, subject) {
|
||||
if (action.id == "org.libvirt.unix.manage" &&
|
||||
subject.isInGroup("${WHEEL_GROUP}")) {
|
||||
return polkit.Result.YES;
|
||||
}
|
||||
});
|
||||
EOF
|
||||
|
||||
# Set up variables.
|
||||
TEST_UUID=$(uuidgen)
|
||||
IMAGE_KEY=osbuild-composer-ostree-test-${TEST_UUID}
|
||||
GUEST_ADDRESS=192.168.100.50
|
||||
|
||||
# Set up temporary files.
|
||||
TEMPDIR=$(mktemp -d)
|
||||
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
|
||||
KS_FILE=${TEMPDIR}/ks.cfg
|
||||
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
|
||||
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
|
||||
|
||||
# SSH setup.
|
||||
SSH_OPTIONS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5"
|
||||
SSH_KEY=${WORKSPACE}/test/keyring/id_rsa
|
||||
chmod 0600 $SSH_KEY
|
||||
|
||||
# Get the compose log.
|
||||
get_compose_log () {
|
||||
COMPOSE_ID=$1
|
||||
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
|
||||
|
||||
# Download the logs.
|
||||
sudo composer-cli compose log $COMPOSE_ID | tee $LOG_FILE > /dev/null
|
||||
}
|
||||
|
||||
# Get the compose metadata.
|
||||
get_compose_metadata () {
|
||||
COMPOSE_ID=$1
|
||||
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
|
||||
|
||||
# Download the metadata.
|
||||
sudo composer-cli compose metadata $COMPOSE_ID > /dev/null
|
||||
|
||||
# Find the tarball and extract it.
|
||||
TARBALL=$(basename $(find . -maxdepth 1 -type f -name "*-metadata.tar"))
|
||||
tar -xf $TARBALL -C ${TEMPDIR}
|
||||
rm -f $TARBALL
|
||||
|
||||
# Move the JSON file into place.
|
||||
cat ${TEMPDIR}/${COMPOSE_ID}.json | jq -M '.' | tee $METADATA_FILE > /dev/null
|
||||
}
|
||||
|
||||
# Build ostree image.
|
||||
build_image() {
|
||||
blueprint_file=$1
|
||||
blueprint_name=$2
|
||||
|
||||
# Prepare the blueprint for the compose.
|
||||
greenprint "📋 Preparing blueprint"
|
||||
sudo composer-cli blueprints push $blueprint_file
|
||||
sudo composer-cli blueprints depsolve $blueprint_name
|
||||
|
||||
# Get worker unit file so we can watch the journal.
|
||||
WORKER_UNIT=$(sudo systemctl list-units | egrep -o "osbuild.*worker.*\.service")
|
||||
sudo journalctl -af -n 1 -u ${WORKER_UNIT} &
|
||||
WORKER_JOURNAL_PID=$!
|
||||
|
||||
# Start the compose.
|
||||
greenprint "🚀 Starting compose"
|
||||
if [[ $blueprint_name == upgrade ]]; then
|
||||
if [[ "${ID}-${VERSION_ID}" == rhel-8.3 ]]; then
|
||||
sudo composer-cli --json compose start-ostree --ref $OSTREE_REF --parent $COMMIT_HASH $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
|
||||
else
|
||||
sudo composer-cli --json compose start-ostree $blueprint_name $IMAGE_TYPE $OSTREE_REF $COMMIT_HASH | tee $COMPOSE_START
|
||||
fi
|
||||
else
|
||||
sudo composer-cli --json compose start $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
|
||||
fi
|
||||
COMPOSE_ID=$(jq -r '.build_id' $COMPOSE_START)
|
||||
|
||||
# Wait for the compose to finish.
|
||||
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
||||
while true; do
|
||||
sudo composer-cli --json compose info ${COMPOSE_ID} | tee $COMPOSE_INFO > /dev/null
|
||||
COMPOSE_STATUS=$(jq -r '.queue_status' $COMPOSE_INFO)
|
||||
|
||||
# Is the compose finished?
|
||||
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Wait 30 seconds and try again.
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Capture the compose logs from osbuild.
|
||||
greenprint "💬 Getting compose log and metadata"
|
||||
get_compose_log $COMPOSE_ID
|
||||
get_compose_metadata $COMPOSE_ID
|
||||
|
||||
# Did the compose finish with success?
|
||||
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
||||
echo "Something went wrong with the compose. 😢"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Stop watching the worker journal.
|
||||
sudo kill ${WORKER_JOURNAL_PID}
|
||||
}
|
||||
|
||||
# Wait for the ssh server up to be.
|
||||
wait_for_ssh_up () {
|
||||
SSH_STATUS=$(ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${1} '/bin/bash -c "echo -n READY"')
|
||||
if [[ $SSH_STATUS == READY ]]; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean up our mess.
|
||||
clean_up () {
|
||||
greenprint "🧼 Cleaning up"
|
||||
sudo virsh destroy ${IMAGE_KEY}
|
||||
if [[ $ARCH == aarch64 ]]; then
|
||||
sudo virsh undefine ${IMAGE_KEY} --nvram
|
||||
else
|
||||
sudo virsh undefine ${IMAGE_KEY}
|
||||
fi
|
||||
# Remove qcow2 file.
|
||||
sudo rm -f $LIBVIRT_IMAGE_PATH
|
||||
# Remove extracted upgrade image-tar.
|
||||
sudo rm -rf $UPGRADE_PATH
|
||||
# Remove "remote" repo.
|
||||
sudo rm -rf ${HTTPD_PATH}/{repo,compose.json}
|
||||
# Remomve tmp dir.
|
||||
sudo rm -rf $TEMPDIR
|
||||
# Stop httpd
|
||||
sudo systemctl disable httpd --now
|
||||
}
|
||||
|
||||
# Test result checking
|
||||
check_result () {
|
||||
greenprint "Checking for test result"
|
||||
if [[ $RESULTS == 1 ]]; then
|
||||
greenprint "💚 Success"
|
||||
else
|
||||
greenprint "❌ Failed"
|
||||
clean_up
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
##################################################
|
||||
##
|
||||
## ostree image/commit installation
|
||||
##
|
||||
##################################################
|
||||
|
||||
# Write a blueprint for ostree image.
|
||||
tee $BLUEPRINT_FILE > /dev/null << EOF
|
||||
name = "ostree"
|
||||
description = "A base ostree image"
|
||||
version = "0.0.1"
|
||||
modules = []
|
||||
groups = []
|
||||
|
||||
[[packages]]
|
||||
name = "python36"
|
||||
version = "*"
|
||||
EOF
|
||||
|
||||
# Build installation image.
|
||||
build_image $BLUEPRINT_FILE ostree
|
||||
|
||||
# Start httpd to serve ostree repo.
|
||||
greenprint "🚀 Starting httpd daemon"
|
||||
sudo systemctl start httpd
|
||||
|
||||
# Download the image and extract tar into web server root folder.
|
||||
greenprint "📥 Downloading and extracting the image"
|
||||
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
|
||||
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
|
||||
HTTPD_PATH="/var/www/html"
|
||||
sudo tar -xf ${IMAGE_FILENAME} -C ${HTTPD_PATH}
|
||||
sudo rm -f $IMAGE_FILENAME
|
||||
|
||||
# Clean compose and blueprints.
|
||||
greenprint "Clean up osbuild-composer"
|
||||
sudo composer-cli compose delete ${COMPOSE_ID} > /dev/null
|
||||
sudo composer-cli blueprints delete ostree > /dev/null
|
||||
|
||||
# Get ostree commit value.
|
||||
greenprint "Get ostree image commit value"
|
||||
COMMIT_HASH=$(jq -r '."ostree-commit"' < ${HTTPD_PATH}/compose.json)
|
||||
|
||||
# Ensure SELinux is happy with our new images.
|
||||
greenprint "👿 Running restorecon on image directory"
|
||||
sudo restorecon -Rv /var/lib/libvirt/images/
|
||||
|
||||
# Create qcow2 file for virt install.
|
||||
greenprint "Create qcow2 file for virt install"
|
||||
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2
|
||||
sudo qemu-img create -f qcow2 ${LIBVIRT_IMAGE_PATH} 20G
|
||||
|
||||
# Write kickstart file for ostree image installation.
|
||||
greenprint "Generate kickstart file"
|
||||
tee $KS_FILE > /dev/null << STOPHERE
|
||||
text
|
||||
lang en_US.UTF-8
|
||||
keyboard us
|
||||
timezone --utc Etc/UTC
|
||||
|
||||
selinux --enforcing
|
||||
rootpw --lock --iscrypted locked
|
||||
user --name=admin --groups=wheel --iscrypted --password=\$6\$1LgwKw9aOoAi/Zy9\$Pn3ErY1E8/yEanJ98evqKEW.DZp24HTuqXPJl6GYCm8uuobAmwxLv7rGCvTRZhxtcYdmC0.XnYRSR9Sh6de3p0
|
||||
sshkey --username=admin "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC61wMCjOSHwbVb4VfVyl5sn497qW4PsdQ7Ty7aD6wDNZ/QjjULkDV/yW5WjDlDQ7UqFH0Sr7vywjqDizUAqK7zM5FsUKsUXWHWwg/ehKg8j9xKcMv11AkFoUoujtfAujnKODkk58XSA9whPr7qcw3vPrmog680pnMSzf9LC7J6kXfs6lkoKfBh9VnlxusCrw2yg0qI1fHAZBLPx7mW6+me71QZsS6sVz8v8KXyrXsKTdnF50FjzHcK9HXDBtSJS5wA3fkcRYymJe0o6WMWNdgSRVpoSiWaHHmFgdMUJaYoCfhXzyl7LtNb3Q+Sveg+tJK7JaRXBLMUllOlJ6ll5Hod root@localhost"
|
||||
|
||||
bootloader --timeout=1 --append="net.ifnames=0 modprobe.blacklist=vc4"
|
||||
|
||||
network --bootproto=dhcp --device=link --activate --onboot=on
|
||||
|
||||
zerombr
|
||||
clearpart --all --initlabel --disklabel=msdos
|
||||
autopart --nohome --noswap --type=plain
|
||||
ostreesetup --nogpg --osname=${IMAGE_TYPE} --remote=${IMAGE_TYPE} --url=http://192.168.100.1/repo/ --ref=${OSTREE_REF}
|
||||
poweroff
|
||||
|
||||
%post --log=/var/log/anaconda/post-install.log --erroronfail
|
||||
|
||||
# no sudo password for user admin
|
||||
echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
|
||||
|
||||
# Remove any persistent NIC rules generated by udev
|
||||
rm -vf /etc/udev/rules.d/*persistent-net*.rules
|
||||
# And ensure that we will do DHCP on eth0 on startup
|
||||
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF
|
||||
DEVICE="eth0"
|
||||
BOOTPROTO="dhcp"
|
||||
ONBOOT="yes"
|
||||
TYPE="Ethernet"
|
||||
PERSISTENT_DHCLIENT="yes"
|
||||
EOF
|
||||
|
||||
echo "Packages within this iot or edge image:"
|
||||
echo "-----------------------------------------------------------------------"
|
||||
rpm -qa | sort
|
||||
echo "-----------------------------------------------------------------------"
|
||||
# Note that running rpm recreates the rpm db files which aren't needed/wanted
|
||||
rm -f /var/lib/rpm/__db*
|
||||
|
||||
echo "Zeroing out empty space."
|
||||
# This forces the filesystem to reclaim space from deleted files
|
||||
dd bs=1M if=/dev/zero of=/var/tmp/zeros || :
|
||||
rm -f /var/tmp/zeros
|
||||
echo "(Don't worry -- that out-of-space error was expected.)"
|
||||
|
||||
%end
|
||||
STOPHERE
|
||||
|
||||
# Install ostree image via anaconda.
|
||||
greenprint "Install ostree image via anaconda"
|
||||
sudo virt-install --initrd-inject=${KS_FILE} \
|
||||
--extra-args="ks=file:/ks.cfg console=ttyS0,115200" \
|
||||
--name=${IMAGE_KEY}\
|
||||
--disk path=${LIBVIRT_IMAGE_PATH},format=qcow2 \
|
||||
--ram 3072 \
|
||||
--vcpus 2 \
|
||||
--network network=integration,mac=34:49:22:B0:83:30 \
|
||||
--os-type linux \
|
||||
--os-variant ${OS_VARIANT} \
|
||||
--location ${BOOT_LOCATION} \
|
||||
--nographics \
|
||||
--noautoconsole \
|
||||
--wait=-1 \
|
||||
--noreboot
|
||||
|
||||
# Start VM.
|
||||
greenprint "Start VM"
|
||||
sudo virsh start ${IMAGE_KEY}
|
||||
|
||||
# Check for ssh ready to go.
|
||||
greenprint "🛃 Checking for SSH is ready to go"
|
||||
for LOOP_COUNTER in `seq 0 30`; do
|
||||
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
|
||||
if [[ $RESULTS == 1 ]]; then
|
||||
echo "SSH is ready now! 🥳"
|
||||
break
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Check image installation result
|
||||
check_result
|
||||
|
||||
##################################################
|
||||
##
|
||||
## ostree image/commit upgrade
|
||||
##
|
||||
##################################################
|
||||
|
||||
# Write a blueprint for ostree image.
|
||||
tee $BLUEPRINT_FILE > /dev/null << EOF
|
||||
name = "upgrade"
|
||||
description = "An upgrade ostree image"
|
||||
version = "0.0.2"
|
||||
modules = []
|
||||
groups = []
|
||||
|
||||
[[packages]]
|
||||
name = "python36"
|
||||
version = "*"
|
||||
|
||||
[[packages]]
|
||||
name = "wget"
|
||||
version = "*"
|
||||
EOF
|
||||
|
||||
# Build upgrade image.
|
||||
build_image $BLUEPRINT_FILE upgrade
|
||||
|
||||
# Download the image and extract tar into web server root folder.
|
||||
greenprint "📥 Downloading and extracting the image"
|
||||
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
|
||||
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
|
||||
UPGRADE_PATH="$(pwd)/upgrade"
|
||||
mkdir -p $UPGRADE_PATH
|
||||
sudo tar -xf $IMAGE_FILENAME -C $UPGRADE_PATH
|
||||
sudo rm -f $IMAGE_FILENAME
|
||||
|
||||
# Clean compose and blueprints.
|
||||
greenprint "Clean up osbuild-composer again"
|
||||
sudo composer-cli compose delete ${COMPOSE_ID} > /dev/null
|
||||
sudo composer-cli blueprints delete upgrade > /dev/null
|
||||
|
||||
# Introduce new ostree commit into repo.
|
||||
greenprint "Introduce new ostree commit into repo"
|
||||
sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" $OSTREE_REF
|
||||
sudo ostree summary --update --repo "${HTTPD_PATH}/repo"
|
||||
|
||||
# Ensure SELinux is happy with all objects files.
|
||||
greenprint "👿 Running restorecon on web server root folder"
|
||||
sudo restorecon -Rv "${HTTPD_PATH}/repo" > /dev/null
|
||||
|
||||
# Get ostree commit value.
|
||||
greenprint "Get ostree image commit value"
|
||||
UPGRADE_HASH=$(jq -r '."ostree-commit"' < ${UPGRADE_PATH}/compose.json)
|
||||
|
||||
# Upgrade image/commit.
|
||||
greenprint "Upgrade ostree image/commit"
|
||||
ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
|
||||
ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
|
||||
|
||||
# Sleep 10 seconds here to make sure vm restarted already
|
||||
sleep 10
|
||||
|
||||
# Check for ssh ready to go.
|
||||
greenprint "🛃 Checking for SSH is ready to go"
|
||||
for LOOP_COUNTER in `seq 0 30`; do
|
||||
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
|
||||
if [[ $RESULTS == 1 ]]; then
|
||||
echo "SSH is ready now! 🥳"
|
||||
break
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Check ostree upgrade result
|
||||
check_result
|
||||
|
||||
# Add instance IP address into /etc/ansible/hosts
|
||||
sudo tee ${TEMPDIR}/inventory > /dev/null << EOF
|
||||
[ostree_guest]
|
||||
${GUEST_ADDRESS}
|
||||
|
||||
[ostree_guest:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_user=admin
|
||||
ansible_private_key_file=${SSH_KEY}
|
||||
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
||||
EOF
|
||||
|
||||
# Test IoT/Edge OS
|
||||
ansible-playbook -v -i ${TEMPDIR}/inventory -e image_type=${IMAGE_TYPE} -e ostree_commit=${UPGRADE_HASH} $(dirname $0)/check_ostree.yaml || RESULTS=0
|
||||
check_result
|
||||
|
||||
# Final success clean up
|
||||
clean_up
|
||||
|
||||
exit 0
|
||||
Loading…
Add table
Add a link
Reference in a new issue