diff --git a/src/integration-tests/src/lib.rs b/src/integration-tests/src/lib.rs index 16f5089c8..dfbad2c1c 100644 --- a/src/integration-tests/src/lib.rs +++ b/src/integration-tests/src/lib.rs @@ -13,7 +13,7 @@ // limitations under the License. use gax::error::Error; -use rand::{Rng, distr::Alphanumeric}; +use rand::{Rng, distr::Alphanumeric, distr::Distribution}; pub type Result = std::result::Result; pub mod error_details; @@ -63,3 +63,14 @@ pub(crate) fn random_workflow_id() -> String { .collect(); format!("{PREFIX}{workflow_id}") } + +pub(crate) struct RandomChars { + chars: &'static [u8], +} + +impl Distribution for RandomChars { + fn sample(&self, rng: &mut R) -> u8 { + let index = rng.random_range(0..self.chars.len()); + self.chars[index] + } +} diff --git a/src/integration-tests/src/storage.rs b/src/integration-tests/src/storage.rs index 3c471c358..a80e88367 100644 --- a/src/integration-tests/src/storage.rs +++ b/src/integration-tests/src/storage.rs @@ -12,11 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Error; -use crate::Result; +use crate::{Error, RandomChars, Result}; +use gax::exponential_backoff::{ExponentialBackoff, ExponentialBackoffBuilder}; +use gax::options::RequestOptionsBuilder; use gax::paginator::{ItemPaginator, Paginator}; +use rand::Rng; +use std::time::Duration; +use storage::model::Bucket; -pub const BUCKET_ID_LENGTH: usize = 32; +pub const BUCKET_ID_LENGTH: usize = 63; pub async fn buckets(builder: storage::client::ClientBuilder) -> Result<()> { // Enable a basic subscriber. Useful to troubleshoot problems and visually @@ -38,6 +42,48 @@ pub async fn buckets(builder: storage::client::ClientBuilder) -> Result<()> { cleanup_stale_buckets(&client, &project_id).await?; + let bucket_id = random_bucket_id(); + let bucket_name = format!("projects/_/buckets/{bucket_id}"); + + println!("\nTesting create_bucket()"); + let create = client + .create_bucket("projects/_", bucket_id) + .set_bucket( + Bucket::new() + .set_project(format!("projects/{project_id}")) + .set_labels([("integration-test", "true")]), + ) + .with_backoff_policy(test_backoff()) + .send() + .await?; + println!("SUCCESS on create_bucket: {create:?}"); + assert_eq!(create.name, bucket_name); + + println!("\nTesting get_bucket()"); + let get = client.get_bucket(&bucket_name).send().await?; + println!("SUCCESS on get_bucket: {get:?}"); + assert_eq!(get.name, bucket_name); + + println!("\nTesting list_buckets()"); + let mut buckets = client + .list_buckets(format!("projects/{project_id}")) + .paginator() + .await + .items(); + let mut bucket_names = Vec::new(); + while let Some(bucket) = buckets.next().await { + bucket_names.push(bucket?.name); + } + println!("SUCCESS on list_buckets"); + assert!( + bucket_names.iter().any(|name| name == &bucket_name), + "missing bucket name {bucket_name} in {bucket_names:?}" + ); + + println!("\nTesting delete_bucket()"); + client.delete_bucket(bucket_name).send().await?; + println!("SUCCESS on delete_bucket"); + Ok(()) } @@ -49,23 +95,23 @@ async fn cleanup_stale_buckets(client: &storage::client::Storage, project_id: &s let stale_deadline = stale_deadline - Duration::from_secs(48 * 60 * 60); let stale_deadline = wkt::Timestamp::clamp(stale_deadline.as_secs() as i64, 0); - let mut items = client + let mut buckets = client .list_buckets(format!("projects/{project_id}")) .paginator() .await .items(); let mut pending = Vec::new(); let mut names = Vec::new(); - while let Some(bucket) = items.next().await { - let item = bucket?; - if let Some("true") = item.labels.get("integration-test").map(String::as_str) { - if let Some(true) = item.create_time.map(|v| v < stale_deadline) { + while let Some(bucket) = buckets.next().await { + let bucket = bucket?; + if let Some("true") = bucket.labels.get("integration-test").map(String::as_str) { + if let Some(true) = bucket.create_time.map(|v| v < stale_deadline) { let client = client.clone(); - let name = item.name.clone(); + let name = bucket.name.clone(); pending.push(tokio::spawn( async move { cleanup_bucket(client, name).await }, )); - names.push(item.name); + names.push(bucket.name); } } } @@ -102,3 +148,24 @@ async fn cleanup_bucket(client: storage::client::Storage, name: String) -> Resul let _ = futures::future::join_all(pending).await; client.delete_bucket(&name).send().await } + +const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyz0123456789"; + +pub(crate) fn random_bucket_id() -> String { + let distr = RandomChars { chars: CHARSET }; + const PREFIX: &str = "rust-sdk-testing-"; + let bucket_id: String = rand::rng() + .sample_iter(distr) + .take(BUCKET_ID_LENGTH - PREFIX.len()) + .map(char::from) + .collect(); + format!("{PREFIX}{bucket_id}") +} + +fn test_backoff() -> ExponentialBackoff { + ExponentialBackoffBuilder::new() + .with_initial_delay(Duration::from_secs(2)) + .with_maximum_delay(Duration::from_secs(10)) + .build() + .unwrap() +}