1148 lines
42 KiB
YAML
1148 lines
42 KiB
YAML
---
|
|
- hosts: ostree_guest
|
|
become: no
|
|
vars:
|
|
workspace: "{{ lookup('env', 'WORKSPACE') }}"
|
|
skip_rollback_test: "false"
|
|
fdo_credential: "false"
|
|
edge_type: "none"
|
|
embeded_container: "false"
|
|
total_counter: "0"
|
|
failed_counter: "0"
|
|
firewall_feature: "false"
|
|
ignition: "false"
|
|
test_custom_dirs_files: "false"
|
|
sysroot_ro: "false"
|
|
fips: "false"
|
|
|
|
tasks:
|
|
# current target host's IP address
|
|
- debug: var=ansible_all_ipv4_addresses
|
|
- debug: var=ansible_facts['distribution_version']
|
|
- debug: var=ansible_facts['distribution']
|
|
- debug: var=ansible_facts['architecture']
|
|
|
|
# check BIOS or UEFI
|
|
- name: check bios or uefi
|
|
stat:
|
|
path: /sys/firmware/efi
|
|
ignore_errors: yes
|
|
|
|
# check secure boot status if it's enabled
|
|
- name: check secure boot status
|
|
command: mokutil --sb-state
|
|
ignore_errors: yes
|
|
|
|
# check tpm device
|
|
- name: check tpm device
|
|
stat:
|
|
path: /dev/tpm0
|
|
ignore_errors: yes
|
|
when: fdo_credential == "true"
|
|
|
|
- name: check partition size
|
|
command: df -h
|
|
ignore_errors: yes
|
|
become: yes
|
|
|
|
- name: check disk partition table
|
|
command: fdisk -l
|
|
ignore_errors: yes
|
|
become: yes
|
|
|
|
- name: check rpm-ostree status
|
|
command: rpm-ostree status
|
|
ignore_errors: yes
|
|
|
|
# default kernel or rt kernel
|
|
- name: check installed kernel
|
|
command: uname -r
|
|
register: result_kernel
|
|
|
|
# case: check rt kernel installed (rt kernel only)
|
|
- name: check rt kernel installed
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'rt' in result_kernel.stdout"
|
|
fail_msg: "rt kernel not installed, ostree upgrade might be failed"
|
|
success_msg: "rt kernel installed in ostree upgrade"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: "'rt' in result_kernel.stdout"
|
|
|
|
- name: check system FIPS mode
|
|
block:
|
|
- name: run 'fips-mode-setup --check'
|
|
command: fips-mode-setup --check
|
|
register: fips_mode_setup
|
|
|
|
- name: check 'fips-mode-setup --check' output
|
|
assert:
|
|
that:
|
|
- "'FIPS mode is enabled' in fips_mode_setup.stdout"
|
|
- "'FIPS mode is disabled' not in fips_mode_setup.stdout"
|
|
- "'Installation of FIPS modules is not completed' not in fips_mode_setup.stdout"
|
|
- "'Inconsistent state detected' not in fips_mode_setup.stdout"
|
|
fail_msg: "FIPS mode not enabled"
|
|
when: fips == "true"
|
|
|
|
# first installed or upgraded
|
|
# first installed has one commit, but upgraded has two
|
|
- name: determin which stage the checking is running on
|
|
shell: rpm-ostree status --json | jq '.deployments | length'
|
|
register: result_stage
|
|
|
|
- set_fact:
|
|
checking_stage: "{{ result_stage.stdout }}"
|
|
|
|
- name: check Ignition has run and the config was provided correctly
|
|
block:
|
|
- name: check user provided config
|
|
shell: cat /etc/.ignition-result.json | jq '.userConfigProvided'
|
|
register: user_provided_config
|
|
- assert:
|
|
that:
|
|
- user_provided_config.stdout == "true"
|
|
fail_msg: "no ign user provided config"
|
|
success_msg: "ignition has run with user provided config"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: Gather Ignition logs
|
|
shell: journalctl --identifier=ignition --all
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: ignition == "true" and ((ansible_facts['distribution'] == 'RedHat' and ansible_facts['distribution_version'] is version('9.2', '>=')) or
|
|
(ansible_facts['distribution'] == 'CentOS' and (ansible_facts['distribution_version'] == '9')))
|
|
|
|
- name: check systemd service correctly started on firstboot
|
|
block:
|
|
|
|
- name: check hello.service logs
|
|
command: journalctl -b -0 -u hello.service
|
|
register: result_hello_service_log
|
|
|
|
- assert:
|
|
that:
|
|
- "'Hello, World!' in result_hello_service_log.stdout"
|
|
fail_msg: "hello.service doesn't have the correct log"
|
|
success_msg: "hello.service started and working"
|
|
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: ignition == "true" and ((ansible_facts['distribution'] == 'RedHat' and ansible_facts['distribution_version'] is version('9.2', '>=')) or
|
|
(ansible_facts['distribution'] == 'CentOS' and (ansible_facts['distribution_version'] == '9')))
|
|
|
|
- name: wait for FDO onboarding
|
|
block:
|
|
- wait_for:
|
|
path: "/etc/device-credentials"
|
|
delay: 10
|
|
timeout: 600
|
|
state: present
|
|
msg: "FDO onboarding credentials not created"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
# TODO: gather fdo-client-linuxapp.service logs
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: fdo_credential == "true"
|
|
|
|
# case: check ostree commit correctly updated
|
|
- name: get deployed ostree commit
|
|
shell: rpm-ostree status --json | jq -r '.deployments[0].checksum'
|
|
register: result_commit
|
|
|
|
- name: make a json result
|
|
set_fact:
|
|
deploy_commit: "{{ result_commit.stdout }}"
|
|
|
|
- name: check commit deployed and built
|
|
block:
|
|
- assert:
|
|
that:
|
|
- deploy_commit == ostree_commit
|
|
fail_msg: "deployed ostree commit is not commit built by osbuild-composer"
|
|
success_msg: "successful building and deployment"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check ostree ref
|
|
- name: check ostree ref
|
|
shell: rpm-ostree status --json | jq -r '.deployments[0].origin'
|
|
register: result_ref
|
|
|
|
- name: check ostree ref deployed
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_ref.stdout == ostree_ref
|
|
fail_msg: "deployed ostree ref failed"
|
|
success_msg: "ostree ref successful building and deployment"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: (ostree_ref is defined) and (ostree_ref|length > 0)
|
|
|
|
# case from bug: https://bugzilla.redhat.com/show_bug.cgi?id=1848453
|
|
- name: check ostree-remount status
|
|
command: systemctl is-active ostree-remount.service
|
|
register: result_remount
|
|
|
|
- name: ostree-remount should be started
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_remount.stdout == "active"
|
|
fail_msg: "ostree-remount is not started by default"
|
|
success_msg: "starting ostree-remount successful"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
- name: set mount point device name
|
|
command: findmnt -r -o SOURCE -n /sysroot
|
|
register: result_sysroot_source
|
|
|
|
- set_fact:
|
|
device_name: "{{ result_sysroot_source.stdout }}"
|
|
|
|
# case: check pv format
|
|
- name: check pv format
|
|
shell: pvs --reportformat json | jq .report[0].pv[0].pv_fmt -r
|
|
become: yes
|
|
register: result_pv_fmt
|
|
when: "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
|
|
- name: "pv format should be lvm2"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_pv_fmt.stdout == "lvm2"
|
|
fail_msg: "pv format is not lvm2"
|
|
success_msg: "pv format is lvm2"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
|
|
# case: check pv size
|
|
- name: check pv size
|
|
shell: pvs --reportformat json | jq .report[0].pv[0].pv_size -r
|
|
become: yes
|
|
register: result_pv_size
|
|
when: "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
|
|
# simplified installer uses coreos-installer to grow fs to 19G
|
|
- name: "pv size should bigger than 19G for simplified installer"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'19' in result_pv_size.stdout"
|
|
fail_msg: "pv size is not bigger than 19G"
|
|
success_msg: "pv size is bigger than 19G"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
- fdo_credential == "true"
|
|
|
|
# raw image does not have coreos-installer to grow fs to 19G
|
|
- name: "pv size should keep at 9G for raw image"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'9' in result_pv_size.stdout"
|
|
fail_msg: "pv size does not keep at 9G"
|
|
success_msg: "pv size keeps at 9G"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
- fdo_credential == "false"
|
|
|
|
# case: check /sysroot lv size
|
|
- name: check sysroot lv size
|
|
shell: df -h | grep sysroot
|
|
register: result_sysroot_lv_size
|
|
when: "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
|
|
- name: "/sysroot lv size should be 9G"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'9.0G' in result_sysroot_lv_size.stdout"
|
|
fail_msg: "pv size is not 9G"
|
|
success_msg: "pv size is 9G"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: "'/dev/mapper/rootvg-rootlv' in result_sysroot_source.stdout"
|
|
|
|
# case: check /sysroot mount status
|
|
- name: check /sysroot mount status
|
|
shell: findmnt -r -o OPTIONS -n /sysroot | awk -F "," '{print $1}'
|
|
register: result_sysroot_mount_status
|
|
|
|
- name: /sysroot should be mount with rw permission
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_sysroot_mount_status.stdout == "rw"
|
|
fail_msg: "/sysroot is not mounted with rw permission"
|
|
success_msg: "/sysroot is mounted with rw permission"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: sysroot_ro == "false"
|
|
|
|
# https://fedoraproject.org/wiki/Changes/Silverblue_Kinoite_readonly_sysroot
|
|
- name: /sysroot should be mount with ro permission on RHEL 9.2 , Centos9 and Fedora 37 above
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_sysroot_mount_status.stdout == "ro"
|
|
fail_msg: "/sysroot is not mounted with ro permission"
|
|
success_msg: "/sysroot is mounted with ro permission"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: sysroot_ro == "true"
|
|
|
|
# case: check /var mount point
|
|
- name: check /var mount point
|
|
command: findmnt -r -o SOURCE -n /var
|
|
register: result_var_mount_point
|
|
|
|
- name: "/var should be mounted on {{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_var_mount_point.stdout == var_mount_path
|
|
fail_msg: "/var does not mount on {{ var_mount_path }}"
|
|
success_msg: "/var mounts on {{ var_mount_path }}"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
vars:
|
|
var_mount_path: "{{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
|
|
|
|
# case: check /var mount status
|
|
- name: check /var mount status
|
|
shell: findmnt -r -o OPTIONS -n /var | awk -F "," '{print $1}'
|
|
register: result_var_mount_status
|
|
|
|
- name: /var should be mount with rw permission
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_var_mount_status.stdout == "rw"
|
|
fail_msg: "/var is not mounted with rw permission"
|
|
success_msg: "/var is mounted with rw permission"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check /usr mount point
|
|
- name: check /usr mount point
|
|
command: findmnt -r -o SOURCE -n /usr
|
|
register: result_usr_mount_point
|
|
|
|
- name: "/usr should be mounted on {{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_usr_mount_point.stdout == usr_mount_path
|
|
fail_msg: "/usr does not mount on {{ usr_mount_path }}"
|
|
success_msg: "/usr mounts on {{ usr_mount_path }}"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
vars:
|
|
usr_mount_path: "{{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
|
|
|
|
# case: check /usr mount status
|
|
- name: check /usr mount status
|
|
shell: findmnt -r -o OPTIONS -n /usr | awk -F "," '{print $1}'
|
|
register: result_usr_mount_status
|
|
|
|
- name: /usr should be mount with rw permission
|
|
block:
|
|
- assert:
|
|
that:
|
|
- result_usr_mount_status.stdout == "ro"
|
|
fail_msg: "/usr is not mounted with ro permission"
|
|
success_msg: "/usr is mounted with ro permission"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
- name: get the first 10 chars in commit hash
|
|
set_fact:
|
|
commit_log: "{{ deploy_commit[:11] }}"
|
|
|
|
# case: check wget installed after upgrade
|
|
- name: check installed package
|
|
shell: rpm -qa | sort
|
|
register: result_packages
|
|
|
|
- name: check wget installed
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'wget' in result_packages.stdout"
|
|
fail_msg: "wget not installed, ostree upgrade might be failed"
|
|
success_msg: "wget installed in ostree upgrade"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: checking_stage == "2"
|
|
|
|
- name: save installed package to log file
|
|
copy:
|
|
content: "{{ result_packages.stdout }}"
|
|
dest: "{{ workspace }}/{{ commit_log }}.installed.ostree.log"
|
|
delegate_to: localhost
|
|
|
|
# case: check ostree-remount mount log
|
|
- name: check ostree-remount mount log
|
|
command: journalctl -u ostree-remount
|
|
register: result_remount_jounalctl
|
|
|
|
- name: ostree-remount should remount /var and /sysroot
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'/sysroot' in result_remount_jounalctl.stdout"
|
|
- "'/var' in result_remount_jounalctl.stdout"
|
|
fail_msg: "/sysroot or /var are not remounted by ostree-remount"
|
|
success_msg: "/sysroot and /var are remount"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
# Skipping playbook task in CS9 and RHEL 9 due to bug https://issues.redhat.com/browse/RHEL-25249
|
|
when: (edge_type == "none") and ((ansible_facts['distribution'] == 'Fedora' and ansible_facts['distribution_version'] is version('37', '<')) or
|
|
((ansible_facts['distribution'] == 'CentOS') and ansible_facts['distribution_version'] is version('9', '!=')) or (ansible_facts['distribution'] != 'RedHat'))
|
|
|
|
# case: check dmesg error and failed log
|
|
- name: check dmesg output
|
|
command: dmesg
|
|
become: yes
|
|
register: result_dmesg
|
|
|
|
- name: save dmesg output to log file
|
|
copy:
|
|
content: "{{ result_dmesg.stdout }}"
|
|
dest: "{{ workspace }}/{{ commit_log }}.dmesg.ostree.log"
|
|
delegate_to: localhost
|
|
|
|
- name: check dmesg error and fail log
|
|
shell: dmesg --notime | grep -i "error\|fail" || true
|
|
register: result_dmesg_error
|
|
become: yes
|
|
|
|
- name: check embeded container image with podman
|
|
command: podman images
|
|
become: yes
|
|
register: result_podman_images
|
|
when: embeded_container == "true"
|
|
|
|
- name: embded container should be listed by podman images
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'quay.io/fedora/fedora' in result_podman_images.stdout"
|
|
fail_msg: "fedora image is not built in image"
|
|
success_msg: "fedora image is built in image"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: embeded_container == "true"
|
|
|
|
- name: embedded fedora-minimal container image should be listed by podman images
|
|
block:
|
|
- assert:
|
|
that:
|
|
- "'localhost/fedora-minimal' in result_podman_images.stdout"
|
|
fail_msg: "fedora-minimal image is not built in image"
|
|
success_msg: "fedora-minimal image is built in image"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: embeded_container == "true"
|
|
|
|
# case: check running container with podman
|
|
- name: run ubi8 image
|
|
command: podman run registry.access.redhat.com/ubi8/ubi-minimal:latest cat /etc/redhat-release
|
|
register: podman_result
|
|
become: yes
|
|
retries: 30 # due to https://github.com/osbuild/osbuild-composer/issues/2492
|
|
delay: 2
|
|
until: podman_result is success
|
|
ignore_errors: yes # due to https://bugzilla.redhat.com/show_bug.cgi?id=1903983
|
|
|
|
- name: run container test
|
|
block:
|
|
- assert:
|
|
that:
|
|
- podman_result is succeeded
|
|
- "'Red Hat Enterprise Linux release' in podman_result.stdout"
|
|
fail_msg: "failed run container with podman"
|
|
success_msg: "running container with podman successed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check running container with podman (non-root)
|
|
- name: run ubi8 image with non-root
|
|
command: podman run ubi8:latest cat /etc/redhat-release
|
|
register: podman_result
|
|
retries: 30 # due to https://github.com/osbuild/osbuild-composer/issues/2492
|
|
delay: 2
|
|
until: podman_result is success
|
|
ignore_errors: yes
|
|
|
|
- name: run container test
|
|
block:
|
|
- assert:
|
|
that:
|
|
- podman_result is succeeded
|
|
- "'Red Hat Enterprise Linux release' in podman_result.stdout"
|
|
fail_msg: "failed run container with podman (non-root)"
|
|
success_msg: "running container with podman (non-root) successed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check fedora-minimal container with podman
|
|
- name: run fedora-minimal image
|
|
command: podman run localhost/fedora-minimal@sha256:4d76a7480ce1861c95975945633dc9d03807ffb45c64b664ef22e673798d414b cat /etc/os-release
|
|
register: fminimal_container_result
|
|
become: yes
|
|
retries: 30 # due to https://github.com/osbuild/osbuild-composer/issues/2492
|
|
delay: 2
|
|
until: fminimal_container_result is success
|
|
ignore_errors: yes # due to https://bugzilla.redhat.com/show_bug.cgi?id=1903983
|
|
when: embeded_container == "true"
|
|
|
|
- name: run fedora-minimal container test
|
|
block:
|
|
- assert:
|
|
that:
|
|
- fminimal_container_result is succeeded
|
|
- "'Fedora Linux 36 (Container Image)' in fminimal_container_result.stdout"
|
|
- "'Trying to pull' not in fminimal_container_result.stdout"
|
|
fail_msg: "failed run fedora-minimal container with podman"
|
|
success_msg: "running fedora-minimal container with podman successed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: embeded_container == "true"
|
|
|
|
# case: check dnf package and it should not be installed
|
|
# https://github.com/osbuild/osbuild-composer/blob/master/internal/distro/rhel8/distro.go#L642
|
|
- name: dnf should not be installed
|
|
block:
|
|
- name: dnf should not be installed
|
|
shell: rpm -qa | grep dnf || echo -n PASS
|
|
register: result_dnf
|
|
|
|
- assert:
|
|
that:
|
|
- result_dnf.stdout == "PASS"
|
|
fail_msg: "dnf is installed"
|
|
success_msg: "No dnf installed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: ansible_facts['distribution'] != 'RedHat' and ansible_facts['distribution_version'] is version('8.5', '!=')
|
|
|
|
# case: check installed greenboot packages
|
|
# https://github.com/osbuild/osbuild-composer/blob/master/internal/distro/rhel8/distro.go#L634
|
|
- name: greenboot and it's related packages should be installed (RHEL 8.6+, CS8, CS9 and Fedora 36+)
|
|
block:
|
|
- name: greenboot and it's related packages should be installed
|
|
shell: rpm -qa | grep greenboot
|
|
register: result_greenboot_packages
|
|
|
|
- assert:
|
|
that:
|
|
- "'greenboot-0' in result_greenboot_packages.stdout"
|
|
- "'greenboot-default-health-checks' in result_greenboot_packages.stdout"
|
|
fail_msg: "Some of greenboot and its related packages are not installed"
|
|
success_msg: "All greenboot and its related packages are installed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: (ansible_facts['distribution'] == 'RedHat' and ansible_facts['distribution_version'] is version('8.6', '>=')) or
|
|
(ansible_facts['distribution'] == 'CentOS') or
|
|
(ansible_facts['distribution'] == 'Fedora' and ansible_facts['distribution_version'] is version('36', '>='))
|
|
|
|
- name: greenboot and it's related packages should be installed (RHEL 8.5)
|
|
block:
|
|
- name: greenboot and it's related packages should be installed
|
|
shell: rpm -qa | grep greenboot
|
|
register: result_greenboot_packages
|
|
|
|
- assert:
|
|
that:
|
|
- "'greenboot-0' in result_greenboot_packages.stdout"
|
|
- "'greenboot-grub2' in result_greenboot_packages.stdout"
|
|
- "'greenboot-rpm-ostree-grub2' in result_greenboot_packages.stdout"
|
|
- "'greenboot-reboot' in result_greenboot_packages.stdout"
|
|
- "'greenboot-status' in result_greenboot_packages.stdout"
|
|
fail_msg: "Some of greenboot and its related packages are not installed"
|
|
success_msg: "All greenboot and its related packages are installed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: (ansible_facts['distribution'] == 'RedHat' and ansible_facts['distribution_version'] is version('8.6', '<'))
|
|
|
|
- name: greenboot should be installed
|
|
block:
|
|
- name: greenboot should be installed
|
|
shell: rpm -qa | grep greenboot
|
|
register: result_greenboot_packages
|
|
|
|
- assert:
|
|
that:
|
|
- "'greenboot-0' in result_greenboot_packages.stdout"
|
|
fail_msg: "greenboot is not installed"
|
|
success_msg: "greenboot is installed"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: ansible_facts['distribution_version'] != '8.5'
|
|
|
|
# case: check greenboot* services
|
|
- name: a list of greenboot* service should be enabled
|
|
block:
|
|
- name: a list of greenboot* service should be enabled
|
|
command: systemctl is-enabled greenboot-grub2-set-counter greenboot-grub2-set-success greenboot-healthcheck greenboot-rpm-ostree-grub2-check-fallback greenboot-status greenboot-task-runner redboot-auto-reboot redboot-task-runner
|
|
register: result_greenboot_service
|
|
|
|
- assert:
|
|
that:
|
|
- result_greenboot_service.stdout == 'enabled\nenabled\nenabled\nenabled\nenabled\nenabled\nenabled\nenabled'
|
|
fail_msg: "Some of greenboot* services are not enabled"
|
|
success_msg: "All greenboot* services are enabled"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check greenboot* services log
|
|
- name: all greenboot* service should run without error
|
|
block:
|
|
- name: check boot-complete.target
|
|
# will fail if the target was not reached
|
|
command: systemctl --no-pager status boot-complete.target
|
|
|
|
# TODO: check service/target status instead of string matching messages
|
|
- name: all greenboot* service should run without error
|
|
command: journalctl -b -0 -u greenboot -u greenboot-healthcheck -u greenboot-rpm-ostree-grub2-check-fallback -u greenboot-grub2-set-counter -u greenboot-grub2-set-success -u greenboot-status -u redboot -u redboot-auto-reboot -u redboot.target
|
|
register: result_greenboot_log
|
|
|
|
- assert:
|
|
that:
|
|
- "'greenboot Health Checks Runner' in result_greenboot_log.stdout"
|
|
- "'Mark boot as successful in grubenv' in result_greenboot_log.stdout"
|
|
- "'Boot Status is GREEN - Health Check SUCCESS' in result_greenboot_log.stdout"
|
|
fail_msg: "Some errors happened in service boot"
|
|
success_msg: "All greenboot services booted success"
|
|
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
# case: check grubenv variables
|
|
- name: grubenv variables should contain boot_success=1
|
|
block:
|
|
- name: grubenv variables should contain boot_success=1
|
|
command: grub2-editenv list
|
|
register: result_grubenv
|
|
become: yes
|
|
|
|
- assert:
|
|
that:
|
|
- "'boot_success=1' in result_grubenv.stdout"
|
|
fail_msg: "Not found boot_success=1"
|
|
success_msg: "Found boot_success=1"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
- name: check fdo-client-linuxapp logs
|
|
command: journalctl -u fdo-client-linuxapp
|
|
register: result_fdo_client_linuxapp_journalctl
|
|
when: fdo_credential == "true"
|
|
|
|
- debug:
|
|
var: result_fdo_client_linuxapp_journalctl
|
|
|
|
# case: check rollback function if boot error found
|
|
- name: install sanely failing health check unit to test red boot status behavior
|
|
block:
|
|
- name: install sanely failing health check unit to test red boot status behavior
|
|
command: rpm-ostree install --cache-only https://s3.amazonaws.com/org.osbuild.test-dependencies/greenboot-failing-unit-1.0-1.el8.noarch.rpm --reboot
|
|
become: yes
|
|
ignore_errors: yes
|
|
ignore_unreachable: yes
|
|
|
|
- name: delay 30 seconds before reboot to make system stable
|
|
pause:
|
|
seconds: 30
|
|
delegate_to: 127.0.0.1
|
|
|
|
- name: wait for connection to become reachable/usable
|
|
wait_for_connection:
|
|
delay: 30
|
|
|
|
- name: waits until instance is reachable
|
|
wait_for:
|
|
host: "{{ ansible_all_ipv4_addresses[0] }}"
|
|
port: 22
|
|
search_regex: OpenSSH
|
|
delay: 10
|
|
register: result_rollback
|
|
|
|
- assert:
|
|
that:
|
|
- result_rollback is succeeded
|
|
fail_msg: "Rollback failed"
|
|
success_msg: "Rollback success"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: skip_rollback_test == "false"
|
|
|
|
# case: check ostree commit after rollback
|
|
- name: check ostree commit after rollback
|
|
block:
|
|
- name: check ostree commit after rollback
|
|
shell: rpm-ostree status --json | jq -r '.deployments[0].checksum'
|
|
register: result_commit
|
|
|
|
- name: make a json result
|
|
set_fact:
|
|
deploy_commit: "{{ result_commit.stdout }}"
|
|
|
|
- assert:
|
|
that:
|
|
- deploy_commit == ostree_commit
|
|
fail_msg: "Not rollback to last commit"
|
|
success_msg: "Rollback success"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- skip_rollback_test == "false"
|
|
- result_rollback is succeeded
|
|
|
|
# case: check greenboot* services log again
|
|
- name: fallback log should be found here
|
|
block:
|
|
- name: check boot-complete.target
|
|
# will fail if the target was not reached
|
|
command: systemctl --no-pager status boot-complete.target
|
|
|
|
# TODO: check service/target status instead of string matching messages
|
|
- name: fallback log should be found here
|
|
command: journalctl -b -0 -u greenboot -u greenboot-healthcheck -u greenboot-rpm-ostree-grub2-check-fallback -u greenboot-grub2-set-counter -u greenboot-grub2-set-success -u greenboot-status -u redboot -u redboot-auto-reboot -u redboot.target
|
|
register: result_greenboot_log
|
|
|
|
- assert:
|
|
that:
|
|
- "'FALLBACK BOOT DETECTED! Default rpm-ostree deployment has been rolled back' in result_greenboot_log.stdout"
|
|
fail_msg: "Fallback log not found"
|
|
success_msg: "Found fallback log"
|
|
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- skip_rollback_test == "false"
|
|
- result_rollback is succeeded
|
|
|
|
# case: check grubenv variables again
|
|
- name: grubenv variables should contain boot_success=1
|
|
block:
|
|
- name: grubenv variables should contain boot_success=1
|
|
command: grub2-editenv list
|
|
register: result_grubenv
|
|
become: yes
|
|
|
|
- assert:
|
|
that:
|
|
- "'boot_success=1' in result_grubenv.stdout"
|
|
fail_msg: "Not found boot_success=1"
|
|
success_msg: "Found boot_success=1"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- skip_rollback_test == "false"
|
|
- result_rollback is succeeded
|
|
|
|
# Reboot for persistent logging and disk re-encryption
|
|
- name: reboot system when rollback test is skipped
|
|
block:
|
|
- name: reboot system
|
|
reboot:
|
|
become: yes
|
|
register: result_reboot
|
|
until: result_reboot is success
|
|
retries: 10
|
|
delay: 5
|
|
|
|
- name: delay 10 seconds
|
|
pause:
|
|
seconds: 10
|
|
delegate_to: 127.0.0.1
|
|
|
|
- name: wait for connection to become reachable/usable
|
|
wait_for_connection:
|
|
delay: 30
|
|
|
|
- name: waits until instance is reachable
|
|
wait_for:
|
|
host: "{{ ansible_all_ipv4_addresses[0] }}"
|
|
port: 22
|
|
search_regex: OpenSSH
|
|
delay: 10
|
|
register: result_waitfor
|
|
until: result_waitfor is success
|
|
retries: 6
|
|
delay: 10
|
|
when:
|
|
- skip_rollback_test == "true"
|
|
|
|
- name: check journald has persistent logging
|
|
block:
|
|
- name: list boots
|
|
shell: journalctl --list-boots -q
|
|
register: result_list_boots
|
|
|
|
- assert:
|
|
that:
|
|
- result_list_boots.stdout_lines | length > 1
|
|
fail_msg: "journald hasn't persistent logging"
|
|
success_msg: "journald has persistent logging"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
|
|
- name: check fdo-client-linuxapp logs
|
|
command: journalctl -u fdo-client-linuxapp
|
|
register: result_fdo_client_linuxapp_journalctl
|
|
when: fdo_credential == "true"
|
|
|
|
- debug:
|
|
var: result_fdo_client_linuxapp_journalctl
|
|
|
|
- name: wait for FDO re-encryption
|
|
block:
|
|
- shell: cryptsetup luksDump /dev/vda4
|
|
register: result
|
|
until: not result.stdout_lines is search("cipher_null-ecb")
|
|
retries: 30
|
|
delay: 60
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: fdo_credential == "true"
|
|
|
|
- name: check fdo-client-linuxapp logs
|
|
command: journalctl -u fdo-client-linuxapp
|
|
register: result_fdo_client_linuxapp_journalctl
|
|
when: fdo_credential == "true"
|
|
|
|
- debug:
|
|
var: result_fdo_client_linuxapp_journalctl
|
|
|
|
# case: check fdo device mac in device info field within device credentials
|
|
- name: Check mac address within device credentials
|
|
block:
|
|
- name: Check MAC address of interface taken from fdo customization
|
|
shell: "cat /sys/class/net/{{ mfg_guest_int_name }}/address"
|
|
register: fdo_cust_mac_add
|
|
- name: Check mac within fdo device credentials
|
|
shell: fdo-owner-tool dump-device-credential /etc/device-credentials | grep -E 'Device Info' | awk '{print $3}'
|
|
register: dev_credentials_mac_add
|
|
- assert:
|
|
that:
|
|
- dev_credentials_mac_add.stdout == fdo_cust_mac_add.stdout
|
|
fail_msg: "Wrong device info within device credentials"
|
|
success_msg: "Device onboarded successfully via network interface"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when:
|
|
- fdo_credential == "true"
|
|
- (ansible_facts['distribution_version'] is version('9.4', '>=')) or (ansible_facts['distribution_version'] is version('9', '=='))
|
|
|
|
# case: checking firewall customizations
|
|
- name: Check applied firewall customizations
|
|
block:
|
|
- name: Ensure firewall customizations applied from blueprint in trusted zone
|
|
command: firewall-cmd --info-zone=trusted
|
|
register: result_trusted_zone
|
|
become: yes
|
|
- name: Ensure firewall customizations applied from blueprint in work zone
|
|
command: firewall-cmd --info-zone=work
|
|
register: result_work_zone
|
|
become: yes
|
|
|
|
- assert:
|
|
that:
|
|
- "'192.168.100.51' in result_trusted_zone.stdout"
|
|
- "'192.168.100.52' in result_work_zone.stdout"
|
|
fail_msg: "No firewall customizations found"
|
|
success_msg: "Firewall customizations added from blueprint"
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: firewall_feature == "true"
|
|
|
|
# case: checking files and directories customizations
|
|
- name: Check that custom files and directories are present
|
|
block:
|
|
# Test basic custom files and directories creation
|
|
- name: Check that /etc/custom_dir exists
|
|
stat:
|
|
path: /etc/custom_dir
|
|
|
|
- name: Check that /etc/custom_dir/dir1 exists
|
|
stat:
|
|
path: /etc/custom_dir/dir1
|
|
register: result_custom_dir
|
|
|
|
- name: Check that /etc/custom_dir/dir1 is owned by user ID `1020`
|
|
assert:
|
|
that:
|
|
- result_custom_dir.stat.uid == 1020
|
|
fail_msg: "Directory /etc/custom_dir/dir1 is not owned by user ID '1020'"
|
|
success_msg: "Directory /etc/custom_dir/dir1 is owned by user ID '1020'"
|
|
|
|
- name: Check that /etc/custom_dir/dir1 is owned by group ID `1020`
|
|
assert:
|
|
that:
|
|
- result_custom_dir.stat.gid == 1020
|
|
fail_msg: "Directory /etc/custom_dir/dir1 is not owned by group ID '1020'"
|
|
success_msg: "Directory /etc/custom_dir/dir1 is owned by group ID '1020'"
|
|
|
|
- name: Check that /etc/custom_dir/dir1 has 0770 permissions
|
|
assert:
|
|
that:
|
|
- result_custom_dir.stat.mode == '0770'
|
|
fail_msg: "Directory /etc/custom_dir/dir1 has wrong permissions"
|
|
success_msg: "Directory /etc/custom_dir/dir1 has correct permissions"
|
|
|
|
# Test the use of custom files for systemd units
|
|
- name: Check status of 'custom.service'
|
|
systemd:
|
|
name: custom.service
|
|
register: result_custom_service
|
|
|
|
- name: Check that 'custom.service' is started and enabled
|
|
assert:
|
|
that:
|
|
- result_custom_service.status['LoadState'] == 'loaded'
|
|
- result_custom_service.status['ActiveState'] == 'active'
|
|
- result_custom_service.status['SubState'] == 'exited'
|
|
- result_custom_service.status['UnitFileState'] == 'enabled'
|
|
fail_msg: "Service 'custom.service' is not started or enabled"
|
|
success_msg: "Service 'custom.service' is started and enabled"
|
|
|
|
- name: Check that 'custom.service' was overridden by drop-in
|
|
assert:
|
|
that:
|
|
- "'/etc/systemd/system/custom.service.d/override.conf' in result_custom_service.status['DropInPaths']"
|
|
fail_msg: "Service 'custom.service' was not overridden by drop-in"
|
|
success_msg: "Service 'custom.service' was overridden by drop-in"
|
|
|
|
- name: Check output of 'custom.service' in the journal
|
|
command: journalctl -b -u custom.service
|
|
register: custom_service_journal
|
|
become: yes
|
|
|
|
- name: Check that 'image builder is the best' message is present in journal
|
|
assert:
|
|
that:
|
|
- "'image builder is the best' in custom_service_journal.stdout"
|
|
fail_msg: "Message 'image builder is the best' is not present in journal"
|
|
success_msg: "Message 'image builder is the best' is present in journal"
|
|
|
|
always:
|
|
- set_fact:
|
|
total_counter: "{{ total_counter | int + 1 }}"
|
|
rescue:
|
|
- name: failed count + 1
|
|
set_fact:
|
|
failed_counter: "{{ failed_counter | int + 1 }}"
|
|
when: test_custom_dirs_files == "true"
|
|
|
|
- name: Check if any test failed
|
|
assert:
|
|
that:
|
|
- failed_counter == "0"
|
|
fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed"
|
|
success_msg: "Totally {{ total_counter }} test passed"
|