@@ -1046,36 +1046,78 @@ def put_get_bucket_lifecycle_test(
1046
1046
raise TestExecError ("bucket lifecycle config retrieval failed" )
1047
1047
else :
1048
1048
raise TestExecError ("bucket life cycle retrieved" )
1049
- objs_total = (config .test_ops ["version_count" ]) * (config .objects_count )
1050
- if not upload_start_time :
1051
- upload_start_time = time .time ()
1052
- if not upload_end_time :
1053
- upload_end_time = time .time ()
1054
- time_diff = math .ceil (upload_end_time - upload_start_time )
1055
- time_limit = upload_start_time + (
1056
- config .rgw_lc_debug_interval * config .test_ops .get ("actual_lc_days" , 20 )
1057
- )
1058
- for rule in config .lifecycle_conf :
1059
- if rule .get ("Expiration" , {}).get ("Date" , False ):
1060
- # todo: need to get the interval value from yaml file
1061
- log .info (f"wait for 60 seconds" )
1062
- time .sleep (60 )
1063
- else :
1064
- while time .time () < time_limit :
1065
- bucket_stats_op = utils .exec_shell_cmd (
1066
- "radosgw-admin bucket stats --bucket=%s" % bucket .name
1049
+ if config .test_ops .get ("reuse_account_bucket" , False ) is True :
1050
+ max_retries = 1500
1051
+ sleep_interval = 30
1052
+ bucket_stats_output = utils .exec_shell_cmd (
1053
+ f"radosgw-admin bucket stats --bucket tenant1/{ bucket .name } "
1054
+ )
1055
+ bucket_stats_json = json .loads (bucket_stats_output )
1056
+ objects_before_transition = bucket_stats_json ["usage" ]["rgw.main" ][
1057
+ "num_objects"
1058
+ ]
1059
+ lc_transition_start_time = time .time ()
1060
+ for retry in range (max_retries + 2 ):
1061
+ if retry == 0 :
1062
+ time .sleep (
1063
+ max_retries
1064
+ ) # since value of max_retries is same as rgw_lc_debug_interval
1065
+
1066
+ bucket_stats_output = utils .exec_shell_cmd (
1067
+ f"radosgw-admin bucket stats --bucket tenant1/{ bucket .name } "
1068
+ )
1069
+ log .info (f"bucket stats output for { bucket .name } : { bucket_stats_output } " )
1070
+ bucket_stats_json = json .loads (bucket_stats_output )
1071
+
1072
+ if (
1073
+ bucket_stats_json ["usage" ]["rgw.cloudtiered" ]["num_objects" ]
1074
+ >= objects_before_transition
1075
+ and bucket_stats_json ["usage" ]["rgw.usage" ]["num_objects" ] == 0
1076
+ ):
1077
+ log .info (
1078
+ f" all the objects for bucket successfully cloud transitioned to IBM"
1067
1079
)
1068
- json_doc1 = json .loads (bucket_stats_op )
1069
- obj_pre_lc = json_doc1 ["usage" ]["rgw.main" ]["num_objects" ]
1070
- if obj_pre_lc == objs_total or config .test_lc_transition :
1071
- time .sleep (config .rgw_lc_debug_interval )
1072
- else :
1073
- raise TestExecError ("Objects expired before the expected days" )
1074
- lc_grace_time = config .test_ops .get ("lc_grace_time" , 90 )
1075
- log .info (
1076
- f"sleeping for { time_diff + lc_grace_time } seconds so that all objects gets expired/transitioned"
1077
- )
1078
- time .sleep (time_diff + lc_grace_time )
1080
+ break
1081
+ else :
1082
+ log .info (
1083
+ f"Cloud transition still in progress after { retry } retry, sleep for { sleep_interval } and retry"
1084
+ )
1085
+ time .sleep (sleep_interval )
1086
+ if retry > max_retries :
1087
+ raise AssertionError (
1088
+ f"LC transition to cloud for { objects_before_transition } failed"
1089
+ )
1090
+ else :
1091
+
1092
+ objs_total = (config .test_ops ["version_count" ]) * (config .objects_count )
1093
+ if not upload_start_time :
1094
+ upload_start_time = time .time ()
1095
+ if not upload_end_time :
1096
+ upload_end_time = time .time ()
1097
+ time_diff = math .ceil (upload_end_time - upload_start_time )
1098
+ time_limit = upload_start_time + (
1099
+ config .rgw_lc_debug_interval * config .test_ops .get ("actual_lc_days" , 20 )
1100
+ )
1101
+ for rule in config .lifecycle_conf :
1102
+ if rule .get ("Expiration" , {}).get ("Date" , False ):
1103
+ # todo: need to get the interval value from yaml file
1104
+ log .info ("wait for 60 seconds" )
1105
+ time .sleep (60 )
1106
+ else :
1107
+ while time .time () < time_limit :
1108
+ bucket_stats_op = utils .exec_shell_cmd (
1109
+ "radosgw-admin bucket stats --bucket=%s" % bucket .name
1110
+ )
1111
+ json_doc1 = json .loads (bucket_stats_op )
1112
+ obj_pre_lc = json_doc1 ["usage" ]["rgw.main" ]["num_objects" ]
1113
+ if obj_pre_lc == objs_total or config .test_lc_transition :
1114
+ time .sleep (config .rgw_lc_debug_interval )
1115
+ else :
1116
+ raise TestExecError ("Objects expired before the expected days" )
1117
+ log .info (
1118
+ f"sleeping for { time_diff + 90 } seconds so that all objects gets expired/transitioned"
1119
+ )
1120
+ time .sleep (time_diff + 90 )
1079
1121
1080
1122
if config .test_ops .get ("conflict_exp_days" ):
1081
1123
bucket_stats_op = utils .exec_shell_cmd (
@@ -2029,17 +2071,23 @@ def prepare_for_bucket_lc_transition(config):
2029
2071
f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class CLOUDIBM --tier-type=cloud-s3 --tier-config=endpoint={ endpoint } ,access_key={ access } ,secret={ secret } ,target_path={ target_path } ,multipart_sync_threshold=44432,multipart_min_part_size=44432,retain_head_object=false,region=au-syd"
2030
2072
)
2031
2073
else :
2032
- target_path = "aws-bucket-01"
2074
+ wget_cmd = "curl -o aws_cloud.env http://magna002.ceph.redhat.com/cephci-jenkins/aws_cloud_file"
2075
+ utils .exec_shell_cmd (cmd = f"{ wget_cmd } " )
2076
+ aws_config = configobj .ConfigObj ("aws_cloud.env" )
2077
+ target_path = aws_config ["TARGET" ]
2078
+ access = aws_config ["ACCESS" ]
2079
+ secret = aws_config ["SECRET" ]
2080
+ endpoint = aws_config ["ENDPOINT" ]
2033
2081
utils .exec_shell_cmd (
2034
2082
f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class=CLOUDAWS --tier-type=cloud-s3"
2035
2083
)
2036
2084
if config .test_ops .get ("test_retain_head" , False ):
2037
2085
utils .exec_shell_cmd (
2038
- f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class CLOUDAWS --tier-type=cloud-s3 --tier-config=endpoint=http://s3region.amazonaws.com ,access_key=awsaccesskey ,secret=awssecretkey ,target_path={ target_path } ,multipart_sync_threshold=44432,multipart_min_part_size=44432,retain_head_object=true,region=aws-region "
2086
+ f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class CLOUDAWS --tier-type=cloud-s3 --tier-config=endpoint={ endpoint } ,access_key={ access } ,secret={ secret } ,target_path={ target_path } ,multipart_sync_threshold=44432,multipart_min_part_size=44432,retain_head_object=true,region=us-east-1 "
2039
2087
)
2040
2088
else :
2041
2089
utils .exec_shell_cmd (
2042
- f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class CLOUDAWS --tier-type=cloud-s3 --tier-config=endpoint=http://s3.aws-region.amazonaws.com ,access_key=awsaccesskey ,secret=awssecretkey ,target_path={ target_path } ,multipart_sync_threshold=44432,multipart_min_part_size=44432,retain_head_object=false,region=us-east-1"
2090
+ f"radosgw-admin zonegroup placement add --rgw-zonegroup { zonegroup } --placement-id default-placement --storage-class CLOUDAWS --tier-type=cloud-s3 --tier-config=endpoint={ endpoint } ,access_key={ access } ,secret={ secret } ,target_path={ target_path } ,multipart_sync_threshold=44432,multipart_min_part_size=44432,retain_head_object=false,region=us-east-1"
2043
2091
)
2044
2092
if is_multisite :
2045
2093
utils .exec_shell_cmd ("radosgw-admin period update --commit" )
@@ -3172,3 +3220,45 @@ def bring_up_all_rgws_in_the_site(rgw_service_name, retry=10, delay=10):
3172
3220
break
3173
3221
if retry_count + 1 == retry :
3174
3222
raise AssertionError ("Node is not in expected state!!" )
3223
+
3224
+
3225
+ def configure_rgw_lc_settings ():
3226
+ """
3227
+ Retrieves RGW services using 'ceph orch ls | grep rgw' and sets LC debug configs.
3228
+ """
3229
+ log .info ("Retrieving RGW service names..." )
3230
+
3231
+ # Fetch RGW services
3232
+ rgw_services_output = utils .exec_shell_cmd ("ceph orch ls | grep rgw" )
3233
+
3234
+ if not rgw_services_output :
3235
+ log .error ("No RGW services found or failed to retrieve." )
3236
+ return
3237
+
3238
+ # Extract service names from output
3239
+ rgw_services = []
3240
+ for line in rgw_services_output .split ("\n " ):
3241
+ line = line .strip ()
3242
+ if line : # Ignore empty lines
3243
+ columns = line .split ()
3244
+ if columns : # Ensure there are columns before accessing
3245
+ rgw_services .append (columns [0 ])
3246
+
3247
+ if not rgw_services :
3248
+ log .warning ("No valid RGW services extracted." )
3249
+ return
3250
+
3251
+ log .info (f"Found RGW services: { rgw_services } " )
3252
+
3253
+ # Set LC debug interval for each RGW service
3254
+ for service in rgw_services :
3255
+ lc_config_cmd1 = f"ceph config set client.{ service } rgw_lc_debug_interval 600"
3256
+ log .info (f"Setting LC config for { service } : { lc_config_cmd1 } " )
3257
+ utils .exec_shell_cmd (lc_config_cmd1 )
3258
+ lc_config_cmd2 = "ceph config set client.{service} rgw_lc_max_worker 10"
3259
+ log .info (f"Setting LC config for { service } : { lc_config_cmd2 } " )
3260
+ utils .exec_shell_cmd (lc_config_cmd2 )
3261
+ ceph_restart_cmd = f"ceph orch restart { service } "
3262
+ utils .exec_shell_cmd (ceph_restart_cmd )
3263
+
3264
+ log .info ("RGW LC debug interval settings updated successfully." )
0 commit comments