Skip to content

Commit e8c37f4

Browse files
authored
Merge pull request #17 from timflannagan1/unit-tests
Add various integration tests
2 parents 742f3cf + 154ff8d commit e8c37f4

25 files changed

+591
-5
lines changed

library/find_unused_disk.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,11 @@
1919
description: Sets the maximum number of unused disks to return.
2020
default: 10
2121
type: int
22+
23+
option-name: min_size
24+
description: Specifies the minimum disk size to return an unused disk.
25+
default: 0
26+
type: str
2227
'''
2328

2429
EXAMPLES = '''
@@ -27,7 +32,9 @@
2732
tasks:
2833
- name: run module
2934
find_unused_disk:
35+
min_size: '10g'
3036
register: testout
37+
3138
- name: dump test output
3239
debug:
3340
msg: '{{ testout }}'
@@ -57,6 +64,7 @@
5764

5865
from ansible.module_utils.basic import AnsibleModule
5966
from ansible.module_utils import facts
67+
from ansible.module_utils.size import Size
6068

6169

6270
def no_signature(run_command, disk_path):
@@ -83,7 +91,8 @@ def can_open(disk_path):
8391
def run_module():
8492
"""Create the module"""
8593
module_args = dict(
86-
max_return=dict(type='int', required=False, default=10)
94+
max_return=dict(type='int', required=False, default=10),
95+
min_size=dict(type='str', required=False, default=0)
8796
)
8897

8998
result = dict(
@@ -98,17 +107,27 @@ def run_module():
98107

99108
ansible_facts = facts.ansible_facts(module)
100109
run_command = module.run_command
110+
101111
for disk in ansible_facts['devices'].keys():
102112
# If partition table exists but contains no partitions -> no partitions.
103113
no_partitions = not bool(ansible_facts['devices'][disk]['partitions'])
104114

115+
ansible_disk_size = ansible_facts['devices'][disk]['size'].lower().replace('gb', 'g').replace('mb', 'm')
116+
disk_size = Size(ansible_disk_size)
117+
min_disk_size = Size(module.params['min_size'])
118+
105119
if no_partitions and no_signature(run_command, '/dev/' + disk) and no_holders(disk) and can_open('/dev/' + disk):
106-
result['disks'].append(disk)
120+
if min_disk_size.bytes <= disk_size.bytes:
121+
result['disks'].append(disk)
122+
107123
if len(result['disks']) >= module.params['max_return']:
108124
break
109125

110126
if not result['disks']:
111127
result['disks'] = "Unable to find unused disk"
128+
else:
129+
result['disks'].sort()
130+
112131
module.exit_json(**result)
113132

114133

tasks/fs-default.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,11 @@
99
state: present
1010
- set_fact:
1111
xfsprogs_installed: true
12+
rescue:
13+
- debug:
14+
msg:
15+
- "Failed to install the xfsprogs package. Check if the Scalable File System add-on is enabled."
16+
- "See: https://access.redhat.com/discussions/2051373"
1217
when: "volume.fs_type == 'xfs' and ['xfsprogs'] is not subset(ansible_facts.packages.keys()) \
1318
and xfsprogs_installed is undefined and not ansible_check_mode \
1419
and ((pool is defined and pool.state in 'present') or volume.state in 'present')"
@@ -45,6 +50,12 @@
4550
when: volume._wipe and volume._orig_mount_point
4651
changed_when: false
4752

53+
- name: Check if we can use the force parameter in wipefs
54+
set_fact:
55+
volume: "{{ volume|combine({'fs_destroy_options': '-a'}) }}"
56+
when: (ansible_distribution == 'CentOS' and ansible_distribution_major_version == '6') or
57+
(ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6')
58+
4859
- name: Remove file system as needed
4960
command: wipefs {{ volume.fs_destroy_options }} {{ volume._device }}
5061
when: volume._wipe or volume._remove and device_status.stat.exists and not ansible_check_mode

tasks/main.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,14 @@
33
package_facts:
44
manager: "auto"
55

6+
- name: define an empty list of pools to be used in testing
7+
set_fact:
8+
_storage_pools_list: []
9+
10+
- name: define an empty list of volumes to be used in testing
11+
set_fact:
12+
_storage_volumes_list: []
13+
614
- name: manage pools
715
include_tasks: pool-{{ storage_backend }}.yml
816
loop: "{{ storage_pools }}"

tasks/mount-default.yml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,4 +44,8 @@
4444

4545
- name: tell systemd to refresh its view of /etc/fstab
4646
command: systemctl daemon-reload
47-
when: mount_info is defined and mount_info.changed
47+
when:
48+
- mount_info is defined
49+
- mount_info.changed
50+
- (ansible_distribution == 'CentOS' and ansible_distribution_major_version != '6') or
51+
(ansible_distribution == 'RedHat' and ansible_distribution_major_version != '6')

tasks/pool-default.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,10 @@
4141
pool: "{{ pool|combine({'_preexist': pool.name in ansible_facts.lvm.vgs}) }}"
4242
when: ansible_facts.lvm is defined and pool.type == "lvm"
4343

44+
- name: Add the finished pool to a list of pools
45+
set_fact:
46+
_storage_pools_list: "{{ _storage_pools_list + [pool] }}"
47+
4448
#
4549
# XXX This is only going to remove fstab entries etc. for volumes explicitly listed.
4650
#

tasks/volume-default.yml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,19 @@
9999
'state': volume.state}) }}"
100100
when: volume.type == "partition"
101101

102+
- name: Use the ext4 file system when distribution is RHEL 6 (and xfs was not specified)
103+
set_fact:
104+
volume: "{{ volume|combine({'fs_type': 'ext4'}) }}"
105+
when:
106+
- raw_volume.fs_type is undefined
107+
- ansible_distribution == 'RedHat'
108+
- ansible_distribution_major_version == '6'
109+
110+
- name: Append volume state info to storage_volumes_list
111+
set_fact:
112+
_storage_volumes_list: "{{ _storage_volumes_list + [volume] }}"
113+
when: volume.type == 'partition' or volume.type == 'disk'
114+
102115
- name: Manage the Specified Volume
103116
include_tasks: "{{ layer }}-{{ storage_backend }}.yml"
104117
loop: "{{ volume_layers if volume._create else volume_layers[::-1] }}"

tests/.fmf/version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
1

tests/get_unused_disk.yml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
---
2+
- name: Find unused disks in the system
3+
find_unused_disk:
4+
min_size: "{{ min_size | default(omit) }}"
5+
register: unused_disks_return
6+
7+
- set_fact:
8+
unused_disks: "{{ unused_disks_return.disks }}"
9+
when: "'Unable to find unused disk' not in unused_disks_return.disks"
10+
11+
- block:
12+
- name: Exit playbook when there's no unused disks in the system
13+
debug:
14+
msg: "Unable to find unused disks. Exiting playbook."
15+
- meta: end_play
16+
when: unused_disks is undefined
17+
18+
- name: Print unused disks
19+
debug:
20+
var: unused_disks

tests/inventory

Lines changed: 0 additions & 2 deletions
This file was deleted.

tests/provision.fmf

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
standard-inventory-qcow2:
2+
qemu:
3+
drive:
4+
- size: 10737418240
5+
- size: 10737418240

tests/test-verify-volumes.yml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
---
2+
- name: Read the /etc/fstab file for volume existence
3+
command: cat /etc/fstab
4+
register: fstab_buffer
5+
changed_when: false
6+
7+
- name: Verify the volumes listed in storage_pools were correctly managed
8+
include_tasks: "{{ current_pool.state is search('absent') | bool | ternary('test-volumes-absent-helper.yml', 'test-volumes-present-helper.yml') }}"
9+
loop: "{{ _storage_pools_list }}"
10+
loop_control:
11+
loop_var: current_pool
12+
when: _storage_pools_list is defined and _storage_pools_list | length > 0
13+
14+
- block:
15+
- name: Verify the volumes listed in current_volumes were correctly managed
16+
assert:
17+
that:
18+
- "{{ current_volume.mount_point in fstab_buffer.stdout if current_volume.state in 'present' \
19+
else current_volume.mount_point not in fstab_buffer.stdout }}"
20+
- "{{ ansible_mounts|selectattr('device', 'equalto', current_volume._device)|list|length==1 \
21+
if current_volume.state in 'present' else \
22+
ansible_mounts|selectattr('device', 'equalto', current_volume._device)|list|length==0 }}"
23+
msg: "The storage volume {{ current_volume.name }} was incorrectly managed."
24+
loop: "{{ _storage_volumes_list }}"
25+
loop_control:
26+
loop_var: current_volume
27+
when: _storage_volumes_list is defined and _storage_volumes_list | length > 0

tests/test-volumes-absent-helper.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
---
2+
- name: Make sure all volumes in pool.volumes are absent
3+
assert:
4+
that:
5+
- current_volume.mount_point not in fstab_buffer.stdout
6+
- "{{ ansible_mounts|selectattr('device', 'equalto', current_volume.name)|list|length == 0 }}"
7+
msg: "The pool volume {{ current_volume.name }} was incorrectly managed."
8+
loop: "{{ current_pool.volumes }}"
9+
loop_control:
10+
loop_var: current_volume

tests/test-volumes-present-helper.yml

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
---
2+
- name: Ensure all lvm volumes were correctly managed in /etc/fstab and ansible_mounts.
3+
assert:
4+
that:
5+
- "{{ ansible_mounts|selectattr('device', 'equalto', current_volume.name)|list|length==1
6+
if current_volume.state is defined and current_volume.state in 'present' else
7+
ansible_mounts|selectattr('device', 'equalto', current_volume.name)|list|length==0
8+
}}"
9+
10+
- "{{ (ansible_mounts|selectattr('device', 'equalto', current_volume.name)|map(attribute='mount')
11+
== current_volume.mount_point) if current_volume.state is defined and current_volume.state in 'present'
12+
else true
13+
}}"
14+
15+
- "{{ current_volume.mount_point not in fstab_buffer.stdout if
16+
(current_volume.state is defined and current_volume.state in 'absent')
17+
else current_volume.mount_point in fstab_buffer.stdout }}"
18+
msg: "The volume {{ current_volume.name }} was incorrectly managed in /etc/fstab."
19+
loop: "{{ current_pool.volumes }}"
20+
loop_control:
21+
loop_var: current_volume

tests/tests_change_disk_fs.yml

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
---
2+
- hosts: all
3+
become: true
4+
tags: ['never', 'expfail']
5+
vars:
6+
mount_location: '/opt/test'
7+
volume_size: '5g'
8+
fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}"
9+
10+
tasks:
11+
- include_role:
12+
name: storage
13+
14+
- include_tasks: get_unused_disk.yml
15+
vars:
16+
min_size: "{{ volume_size }}"
17+
18+
- name: Create a disk device with the default file system type
19+
include_role:
20+
name: storage
21+
vars:
22+
storage_volumes:
23+
- name: test1
24+
type: disk
25+
mount_point: "{{ mount_location }}"
26+
disks: "{{ unused_disks[0] }}"
27+
28+
- include_tasks: verify-role-results.yml
29+
30+
- name: Change the disk device file system type to "{{ fs_type_after }}"
31+
include_role:
32+
name: storage
33+
vars:
34+
storage_volumes:
35+
- name: test1
36+
type: disk
37+
mount_point: "{{ mount_location }}"
38+
fs_type: "{{ fs_type_after }}"
39+
disks: "{{ unused_disks[0] }}"
40+
41+
- include_tasks: verify-role-results.yml

tests/tests_change_disk_mount.yml

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
---
2+
- hosts: all
3+
become: true
4+
tags: ['never', 'expfail']
5+
vars:
6+
mount_location_before: '/opt/test1'
7+
mount_location_after: '/opt/test2'
8+
volume_size: '5g'
9+
10+
tasks:
11+
- include_role:
12+
name: storage
13+
14+
- include_tasks: get_unused_disk.yml
15+
vars:
16+
min_size: "{{ volume_size }}"
17+
18+
- name: Create a disk device mounted at "{{ mount_location_before }}"
19+
include_role:
20+
name: storage
21+
vars:
22+
storage_volumes:
23+
- name: test1
24+
type: disk
25+
mount_point: "{{ mount_location_before }}"
26+
disks: "{{ unused_disks[0] }}"
27+
28+
- include_tasks: verify-role-results.yml
29+
30+
- name: Change the disk device mount location to "{{ mount_location_after }}"
31+
include_role:
32+
name: storage
33+
vars:
34+
storage_volumes:
35+
- name: test1
36+
type: disk
37+
mount_point: "{{ mount_location_after }}"
38+
disks: "{{ unused_disks[0] }}"
39+
40+
- include_tasks: verify-role-results.yml

tests/tests_change_fs.yml

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
---
2+
- hosts: all
3+
become: true
4+
vars:
5+
mount_location: '/opt/test1'
6+
volume_size: '5g'
7+
fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}"
8+
9+
tasks:
10+
- include_role:
11+
name: storage
12+
13+
- include_tasks: get_unused_disk.yml
14+
vars:
15+
min_size: "{{ volume_size }}"
16+
17+
- name: Create a LVM logical volume with default fs_type
18+
include_role:
19+
name: storage
20+
vars:
21+
storage_pools:
22+
- name: foo
23+
disks: "{{ unused_disks[0] }}"
24+
volumes:
25+
- name: test1
26+
size: "{{ volume_size }}"
27+
mount_point: "{{ mount_location }}"
28+
29+
- include_tasks: verify-role-results.yml
30+
31+
- name: Change the file system signature on the logical volume created above
32+
include_role:
33+
name: storage
34+
vars:
35+
storage_pools:
36+
- name: foo
37+
disks: "{{ unused_disks[0] }}"
38+
volumes:
39+
- name: test1
40+
size: "{{ volume_size }}"
41+
fs_type: "{{ fs_after }}"
42+
mount_point: "{{ mount_location }}"
43+
44+
- include_tasks: verify-role-results.yml

0 commit comments

Comments
 (0)