From e47fc9374864890deb8bc1901bcb568096d33ded Mon Sep 17 00:00:00 2001 From: Adam Leventhal Date: Wed, 5 Jan 2022 12:02:46 -0800 Subject: [PATCH] generate iterators over dropshot paginated interfaces (#19) * Adds support for interating with Streams over interfaces that are tagged with the x-dropshot-pagination extension. This requires clients to use the futures crate. Adds tests that compile generated clients. Updates nexus.json to reflect a more recent omicron API. Changes all generated methods to have a lifetime ('a) bound on all references. This isn't necessary for most methods, but greatly simplifies generation of the associated paginated interface. --- Cargo.lock | 85 +- progenitor-impl/src/lib.rs | 352 ++++- progenitor-impl/tests/output/buildomat.out | 94 +- progenitor-impl/tests/output/keeper.out | 25 +- progenitor-impl/tests/output/nexus.out | 1623 ++++++++++++++++---- progenitor/Cargo.toml | 7 + progenitor/tests/build_buildomat.rs | 3 + progenitor/tests/build_keeper.rs | 3 + progenitor/tests/build_nexus.rs | 3 + sample_openapi/nexus.json | 727 +++++++-- 10 files changed, 2314 insertions(+), 608 deletions(-) create mode 100644 progenitor/tests/build_buildomat.rs create mode 100644 progenitor/tests/build_keeper.rs create mode 100644 progenitor/tests/build_nexus.rs diff --git a/Cargo.lock b/Cargo.lock index 7787d09..078afe0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -185,43 +185,92 @@ dependencies = [ ] [[package]] -name = "futures-channel" -version = "0.3.17" +name = "futures" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", + "futures-sink", ] [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" + +[[package]] +name = "futures-executor" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" + +[[package]] +name = "futures-macro" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg", + "futures-channel", "futures-core", + "futures-io", + "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", + "slab", ] [[package]] @@ -608,12 +657,17 @@ name = "progenitor" version = "0.0.0" dependencies = [ "anyhow", + "chrono", + "futures", "getopts", "openapiv3", + "percent-encoding", "progenitor-impl", "progenitor-macro", + "reqwest", "serde", "serde_json", + "uuid", ] [[package]] @@ -1104,7 +1158,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typify" version = "0.0.6-dev" -source = "git+https://github.com/oxidecomputer/typify#58bfcd02a2cd74bff047e9e8ad6e4f2b4f84f3af" +source = "git+https://github.com/oxidecomputer/typify#df983c2981fc055efeba3fc360e724221703d4bd" dependencies = [ "typify-impl", "typify-macro", @@ -1113,9 +1167,10 @@ dependencies = [ [[package]] name = "typify-impl" version = "0.0.6-dev" -source = "git+https://github.com/oxidecomputer/typify#58bfcd02a2cd74bff047e9e8ad6e4f2b4f84f3af" +source = "git+https://github.com/oxidecomputer/typify#df983c2981fc055efeba3fc360e724221703d4bd" dependencies = [ "convert_case", + "log", "proc-macro2", "quote", "rustfmt-wrapper", @@ -1128,7 +1183,7 @@ dependencies = [ [[package]] name = "typify-macro" version = "0.0.6-dev" -source = "git+https://github.com/oxidecomputer/typify#58bfcd02a2cd74bff047e9e8ad6e4f2b4f84f3af" +source = "git+https://github.com/oxidecomputer/typify#df983c2981fc055efeba3fc360e724221703d4bd" dependencies = [ "proc-macro2", "quote", diff --git a/progenitor-impl/src/lib.rs b/progenitor-impl/src/lib.rs index 21231ef..cdd295b 100644 --- a/progenitor-impl/src/lib.rs +++ b/progenitor-impl/src/lib.rs @@ -1,6 +1,6 @@ // Copyright 2021 Oxide Computer Company -use std::cmp::Ordering; +use std::{cmp::Ordering, collections::HashMap}; use convert_case::{Case, Casing}; use indexmap::IndexMap; @@ -10,10 +10,10 @@ use openapiv3::{ }; use proc_macro2::TokenStream; -use quote::{format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; use template::PathTemplate; use thiserror::Error; -use typify::TypeSpace; +use typify::{TypeId, TypeSpace}; use crate::to_schema::ToSchema; @@ -42,6 +42,7 @@ pub struct Generator { inner_type: Option, pre_hook: Option, post_hook: Option, + uses_futures: bool, } struct OperationMethod { @@ -51,6 +52,11 @@ struct OperationMethod { doc_comment: Option, params: Vec, responses: Vec, + dropshot_paginated: Option, +} + +struct DropshotPagination { + item: TypeId, } #[derive(Debug, PartialEq, Eq)] @@ -66,7 +72,7 @@ struct OperationParameter { } enum OperationParameterType { - TokenStream(TokenStream), + Type(TypeId), RawBody, } #[derive(Debug)] @@ -77,7 +83,7 @@ struct OperationResponse { #[derive(Debug)] enum OperationResponseType { - TokenStream(TokenStream), + Type(TypeId), None, Raw, } @@ -271,12 +277,11 @@ impl Generator { ); let typ = self .type_space - .add_type_with_name(&schema, Some(name))? - .parameter_ident(); + .add_type_with_name(&schema, Some(name))?; Ok(OperationParameter { name: sanitize(¶meter_data.name, Case::Snake), - typ: OperationParameterType::TokenStream(typ), + typ: OperationParameterType::Type(typ), kind: OperationParameterKind::Path, }) } @@ -307,13 +312,12 @@ impl Generator { let typ = self .type_space - .add_type_with_name(&schema, Some(name))? - .parameter_ident(); + .add_type_with_name(&schema, Some(name))?; query.push((nam, !parameter_data.required)); Ok(OperationParameter { name: sanitize(¶meter_data.name, Case::Snake), - typ: OperationParameterType::TokenStream(typ), + typ: OperationParameterType::Type(typ), kind: OperationParameterKind::Query( parameter_data.required, ), @@ -344,9 +348,8 @@ impl Generator { ); let typ = self .type_space - .add_type_with_name(&schema, Some(name))? - .parameter_ident(); - OperationParameterType::TokenStream(typ) + .add_type_with_name(&schema, Some(name))?; + OperationParameterType::Type(typ) } else { todo!("media type encoding, no schema: {:#?}", mt); } @@ -450,12 +453,11 @@ impl Generator { ); self.type_space .add_type_with_name(&schema, Some(name))? - .ident() } else { todo!("media type encoding, no schema: {:#?}", mt); }; - OperationResponseType::TokenStream(typ) + OperationResponseType::Type(typ) } else if response.content.first().is_some() { OperationResponseType::Raw } else { @@ -477,7 +479,9 @@ impl Generator { .collect::>>()?; // If the API has declined to specify the characteristics of a - // successful response, we cons up a generic one. + // successful response, we cons up a generic one. Note that this is + // technically permissible within OpenAPI, but advised against in the + // spec. if !success { responses.push(OperationResponse { status_code: StatusCode::Range(2), @@ -485,6 +489,9 @@ impl Generator { }); } + let dropshot_paginated = + self.dropshot_pagination_data(operation, &raw_params, &responses); + Ok(OperationMethod { operation_id: sanitize(operation_id, Case::Snake), method: method.to_string(), @@ -492,36 +499,44 @@ impl Generator { doc_comment: operation.description.clone(), params: raw_params, responses, + dropshot_paginated, }) } - fn process_method(&self, method: &OperationMethod) -> Result { - let operation_id = format_ident!("{}", method.operation_id,); + fn process_method( + &mut self, + method: &OperationMethod, + ) -> Result { + let operation_id = format_ident!("{}", method.operation_id); let mut bounds_items: Vec = Vec::new(); - let params = method + let typed_params = method .params .iter() .map(|param| { let name = format_ident!("{}", param.name); let typ = match ¶m.typ { - OperationParameterType::TokenStream(t) => t.clone(), + OperationParameterType::Type(type_id) => self + .type_space + .get_type(type_id) + .unwrap() + .parameter_ident_with_lifetime("a"), OperationParameterType::RawBody => { - bounds_items.push(quote! { B: Into}); - quote! {B} + bounds_items.push(quote! { B: Into }); + quote! { B } } }; - quote! { - #name: #typ - } + ( + param, + quote! { + #name: #typ + }, + ) }) .collect::>(); - let bounds = if bounds_items.is_empty() { - quote! {} - } else { - quote! { - < #(#bounds_items),* > - } - }; + + let params = typed_params.iter().map(|(_, stream)| stream); + + let bounds = quote! { < 'a, #(#bounds_items),* > }; let query_items = method .params @@ -564,11 +579,11 @@ impl Generator { let body_func = method.params.iter().filter_map(|param| match ¶m.kind { OperationParameterKind::Body => match ¶m.typ { - OperationParameterType::TokenStream(_) => { + OperationParameterType::Type(_) => { Some(quote! { .json(body) }) } OperationParameterType::RawBody => { - Some(quote! { .body(body )}) + Some(quote! { .body(body) }) } }, _ => None, @@ -589,12 +604,12 @@ impl Generator { let (response_type, decode_response) = success_response_items .next() .map(|response| match &response.typ { - OperationResponseType::TokenStream(typ) => { - (typ.clone(), quote! {res.json().await?}) - } + OperationResponseType::Type(type_id) => ( + self.type_space.get_type(type_id).unwrap().ident(), + quote! { res.json().await? }, + ), OperationResponseType::None => { - // TODO this doesn't seem quite right; I think we still want to return the raw response structure here. - (quote! { () }, quote! { () }) + (quote! { reqwest::Response }, quote! { res }) } OperationResponseType::Raw => { (quote! { reqwest::Response }, quote! { res }) @@ -632,7 +647,7 @@ impl Generator { let method_impl = quote! { #[doc = #doc_comment] pub async fn #operation_id #bounds ( - &self, + &'a self, #(#params),* ) -> Result<#response_type> { #url_path @@ -655,20 +670,264 @@ impl Generator { Ok(#decode_response) } }; - Ok(method_impl) + + let stream_impl = method.dropshot_paginated.as_ref().map(|page_data| { + // We're now using futures. + self.uses_futures = true; + + let stream_id = format_ident!("{}_stream", method.operation_id); + + // The parameters are the same as those to the paged method, but + // without "page_token" + let stream_params = + typed_params.iter().filter_map(|(param, stream)| { + if param.name.as_str() == "page_token" { + None + } else { + Some(stream) + } + }); + + // The values passed to get the first page are the inputs to the + // stream method with "None" for the page_token. + let first_params = typed_params.iter().map(|(param, _)| { + if param.name.as_str() == "page_token" { + // The page_token is None when getting the first page. + quote! { None } + } else { + // All other parameters are passed through directly. + format_ident!("{}", param.name).to_token_stream() + } + }); + + // The values passed to get subsequent pages are... + // - the state variable for the page_token + // - None for all other query parameters + // - The method inputs for non-query parameters + let step_params = typed_params.iter().map(|(param, _)| { + if param.name.as_str() == "page_token" { + quote! { state.as_deref() } + } else if let OperationParameterKind::Query(_) = param.kind { + // Query parameters are None; having page_token as Some(_) + // is mutually exclusive with other query parameters. + quote! { None } + } else { + // Non-query parameters are passed in; this is necessary + // e.g. to specify the right path. (We don't really expect + // to see a body parameter here, but we pass it through + // regardless.) + format_ident!("{}", param.name).to_token_stream() + } + }); + + // The item type that we've saved (by picking apart the original + // function's return type) will be the Item type parameter for the + // Stream type we return. + let item = self.type_space.get_type(&page_data.item).unwrap(); + let item_type = item.ident(); + + // TODO document parameters + let doc_comment = format!( + "{}returns a Stream by making successive calls to {}", + method + .doc_comment + .as_ref() + .map(|s| format!("{}\n\n", s)) + .unwrap_or_else(String::new), + item.name(), + method.operation_id, + ); + + + quote! { + #[doc = #doc_comment] + pub fn #stream_id #bounds ( + &'a self, + #(#stream_params),* + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + + // Execute the operation with the basic parameters + // (omitting page_token) to get the first page. + self.#operation_id( + #(#first_params,)* + ) + .map_ok(move |page| { + // The first page is just an iter + let first = futures::stream::iter( + page.items.into_iter().map(Ok) + ); + + // We unfold subsequent pages using page.next_page as + // the seed value. Each iteration returns its items and + // the next page token. + let rest = futures::stream::try_unfold( + page.next_page, + move |state| async move { + if state.is_none() { + // The page_token was None so we've reached + // the end. + Ok(None) + } else { + // Get the next page; here we set all query + // parameters to None (except for the + // page_token), and all other parameters as + // specified at the start of this method. + self.#operation_id( + #(#step_params,)* + ) + .map_ok(|page| { + Some(( + futures::stream::iter( + page + .items + .into_iter() + .map(Ok), + ), + page.next_page, + )) + }) + .await + } + }, + ) + .try_flatten(); + + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + } + }); + + let all = quote! { + #method_impl + #stream_impl + }; + + Ok(all) + } + + // Validates all the necessary conditions for Dropshot pagination. Returns + // the paginated item type data if all conditions are met. + fn dropshot_pagination_data( + &self, + operation: &openapiv3::Operation, + parameters: &[OperationParameter], + responses: &[OperationResponse], + ) -> Option { + if operation + .extensions + .get("x-dropshot-pagination") + .and_then(|v| v.as_bool()) + != Some(true) + { + return None; + } + + // We expect to see at least "page_token" and "limit" parameters. + if parameters + .iter() + .filter(|param| { + matches!( + (param.name.as_str(), ¶m.kind), + ("page_token", OperationParameterKind::Query(_)) + | ("limit", OperationParameterKind::Query(_)) + ) + }) + .count() + != 2 + { + return None; + } + + // All query parameters must be optional since page_token may not be + // specified in conjunction with other query parameters. + if !parameters.iter().all(|param| match ¶m.kind { + OperationParameterKind::Query(required) => !required, + _ => true, + }) { + return None; + } + + // There must be exactly one successful response type. + let mut success_response_items = + responses.iter().filter_map(|response| { + match (&response.status_code, &response.typ) { + ( + StatusCode::Code(200..=299) | StatusCode::Range(2), + OperationResponseType::Type(type_id), + ) => Some(type_id), + _ => None, + } + }); + + let success_response = match ( + success_response_items.next(), + success_response_items.next(), + ) { + (None, _) | (_, Some(_)) => return None, + (Some(success), None) => success, + }; + + let typ = self.type_space.get_type(success_response).ok()?; + let details = match typ.details() { + typify::TypeDetails::Struct(details) => details, + _ => return None, + }; + + let properties = details.properties().collect::>(); + + // There should be exactly two properties: items and next_page + if properties.len() != 2 { + return None; + } + + // We need a next_page property that's an Option. + if let typify::TypeDetails::Option(ref opt_id) = self + .type_space + .get_type(properties.get("next_page")?) + .ok()? + .details() + { + if !matches!( + self.type_space.get_type(opt_id).ok()?.details(), + typify::TypeDetails::Builtin("String") + ) { + return None; + } + } else { + return None; + } + + match self + .type_space + .get_type(properties.get("items")?) + .ok()? + .details() + { + typify::TypeDetails::Array(item) => { + Some(DropshotPagination { item }) + } + _ => None, + } } pub fn generate_text(&mut self, spec: &OpenAPI) -> Result { let output = self.generate_tokens(spec)?; - // Format the file with rustfmt and some whitespace niceties. + // Format the file with rustfmt. let content = rustfmt_wrapper::rustfmt(output).unwrap(); + // Add newlines after end-braces at <= two levels of indentation. Ok(if cfg!(not(windows)) { - let regex = regex::Regex::new(r#"(})(\n\s*[^} ])"#).unwrap(); + let regex = regex::Regex::new(r#"(})(\n\s{0,8}[^} ])"#).unwrap(); regex.replace_all(&content, "$1\n$2").to_string() } else { - let regex = regex::Regex::new(r#"(})(\r\n\s*[^} ])"#).unwrap(); + let regex = regex::Regex::new(r#"(})(\r\n\s{0,8}[^} ])"#).unwrap(); regex.replace_all(&content, "$1\r\n$2").to_string() }) } @@ -688,6 +947,9 @@ impl Generator { if self.type_space.uses_chrono() { deps.push("chrono = { version = \"0.4\", features = [\"serde\"] }") } + if self.uses_futures { + deps.push("futures = \"0.3\"") + } if self.type_space.uses_serde_json() { deps.push("serde_json = \"1.0\"") } diff --git a/progenitor-impl/tests/output/buildomat.out b/progenitor-impl/tests/output/buildomat.out index d1e799e..591fcc1 100644 --- a/progenitor-impl/tests/output/buildomat.out +++ b/progenitor-impl/tests/output/buildomat.out @@ -182,7 +182,7 @@ impl Client { } #[doc = "control_hold: POST /v1/control/hold"] - pub async fn control_hold(&self) -> Result<()> { + pub async fn control_hold<'a>(&'a self) -> Result<()> { let url = format!("{}/v1/control/hold", self.baseurl,); let request = self.client.post(url).build()?; let result = self.client.execute(request).await; @@ -191,16 +191,16 @@ impl Client { } #[doc = "control_resume: POST /v1/control/resume"] - pub async fn control_resume(&self) -> Result<()> { + pub async fn control_resume<'a>(&'a self) -> Result { let url = format!("{}/v1/control/resume", self.baseurl,); let request = self.client.post(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "task_get: GET /v1/task/{task}"] - pub async fn task_get(&self, task: &str) -> Result { + pub async fn task_get<'a>(&'a self, task: &'a str) -> Result { let url = format!( "{}/v1/task/{}", self.baseurl, @@ -213,7 +213,7 @@ impl Client { } #[doc = "tasks_get: GET /v1/tasks"] - pub async fn tasks_get(&self) -> Result> { + pub async fn tasks_get<'a>(&'a self) -> Result> { let url = format!("{}/v1/tasks", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -222,7 +222,10 @@ impl Client { } #[doc = "task_submit: POST /v1/tasks"] - pub async fn task_submit(&self, body: &types::TaskSubmit) -> Result { + pub async fn task_submit<'a>( + &'a self, + body: &'a types::TaskSubmit, + ) -> Result { let url = format!("{}/v1/tasks", self.baseurl,); let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; @@ -231,9 +234,9 @@ impl Client { } #[doc = "task_events_get: GET /v1/tasks/{task}/events"] - pub async fn task_events_get( - &self, - task: &str, + pub async fn task_events_get<'a>( + &'a self, + task: &'a str, minseq: Option, ) -> Result> { let url = format!( @@ -253,7 +256,7 @@ impl Client { } #[doc = "task_outputs_get: GET /v1/tasks/{task}/outputs"] - pub async fn task_outputs_get(&self, task: &str) -> Result> { + pub async fn task_outputs_get<'a>(&'a self, task: &'a str) -> Result> { let url = format!( "{}/v1/tasks/{}/outputs", self.baseurl, @@ -266,10 +269,10 @@ impl Client { } #[doc = "task_output_download: GET /v1/tasks/{task}/outputs/{output}"] - pub async fn task_output_download( - &self, - task: &str, - output: &str, + pub async fn task_output_download<'a>( + &'a self, + task: &'a str, + output: &'a str, ) -> Result { let url = format!( "{}/v1/tasks/{}/outputs/{}", @@ -284,7 +287,10 @@ impl Client { } #[doc = "user_create: POST /v1/users"] - pub async fn user_create(&self, body: &types::UserCreate) -> Result { + pub async fn user_create<'a>( + &'a self, + body: &'a types::UserCreate, + ) -> Result { let url = format!("{}/v1/users", self.baseurl,); let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; @@ -293,7 +299,7 @@ impl Client { } #[doc = "whoami: GET /v1/whoami"] - pub async fn whoami(&self) -> Result { + pub async fn whoami<'a>(&'a self) -> Result { let url = format!("{}/v1/whoami", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -302,9 +308,9 @@ impl Client { } #[doc = "worker_bootstrap: POST /v1/worker/bootstrap"] - pub async fn worker_bootstrap( - &self, - body: &types::WorkerBootstrap, + pub async fn worker_bootstrap<'a>( + &'a self, + body: &'a types::WorkerBootstrap, ) -> Result { let url = format!("{}/v1/worker/bootstrap", self.baseurl,); let request = self.client.post(url).json(body).build()?; @@ -314,7 +320,7 @@ impl Client { } #[doc = "worker_ping: GET /v1/worker/ping"] - pub async fn worker_ping(&self) -> Result { + pub async fn worker_ping<'a>(&'a self) -> Result { let url = format!("{}/v1/worker/ping", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -323,11 +329,11 @@ impl Client { } #[doc = "worker_task_append: POST /v1/worker/task/{task}/append"] - pub async fn worker_task_append( - &self, - task: &str, - body: &types::WorkerAppendTask, - ) -> Result<()> { + pub async fn worker_task_append<'a>( + &'a self, + task: &'a str, + body: &'a types::WorkerAppendTask, + ) -> Result { let url = format!( "{}/v1/worker/task/{}/append", self.baseurl, @@ -336,13 +342,13 @@ impl Client { let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "worker_task_upload_chunk: POST /v1/worker/task/{task}/chunk"] - pub async fn worker_task_upload_chunk>( - &self, - task: &str, + pub async fn worker_task_upload_chunk<'a, B: Into>( + &'a self, + task: &'a str, body: B, ) -> Result { let url = format!( @@ -357,11 +363,11 @@ impl Client { } #[doc = "worker_task_complete: POST /v1/worker/task/{task}/complete"] - pub async fn worker_task_complete( - &self, - task: &str, - body: &types::WorkerCompleteTask, - ) -> Result<()> { + pub async fn worker_task_complete<'a>( + &'a self, + task: &'a str, + body: &'a types::WorkerCompleteTask, + ) -> Result { let url = format!( "{}/v1/worker/task/{}/complete", self.baseurl, @@ -370,15 +376,15 @@ impl Client { let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "worker_task_add_output: POST /v1/worker/task/{task}/output"] - pub async fn worker_task_add_output( - &self, - task: &str, - body: &types::WorkerAddOutput, - ) -> Result<()> { + pub async fn worker_task_add_output<'a>( + &'a self, + task: &'a str, + body: &'a types::WorkerAddOutput, + ) -> Result { let url = format!( "{}/v1/worker/task/{}/output", self.baseurl, @@ -387,11 +393,11 @@ impl Client { let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "workers_list: GET /v1/workers"] - pub async fn workers_list(&self) -> Result { + pub async fn workers_list<'a>(&'a self) -> Result { let url = format!("{}/v1/workers", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -400,11 +406,11 @@ impl Client { } #[doc = "workers_recycle: POST /v1/workers/recycle"] - pub async fn workers_recycle(&self) -> Result<()> { + pub async fn workers_recycle<'a>(&'a self) -> Result { let url = format!("{}/v1/workers/recycle", self.baseurl,); let request = self.client.post(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } } diff --git a/progenitor-impl/tests/output/keeper.out b/progenitor-impl/tests/output/keeper.out index cdfaa91..fc9d179 100644 --- a/progenitor-impl/tests/output/keeper.out +++ b/progenitor-impl/tests/output/keeper.out @@ -123,16 +123,16 @@ impl Client { } #[doc = "enrol: POST /enrol"] - pub async fn enrol(&self, body: &types::EnrolBody) -> Result<()> { + pub async fn enrol<'a>(&'a self, body: &'a types::EnrolBody) -> Result { let url = format!("{}/enrol", self.baseurl,); let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "global_jobs: GET /global/jobs"] - pub async fn global_jobs(&self) -> Result { + pub async fn global_jobs<'a>(&'a self) -> Result { let url = format!("{}/global/jobs", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -141,7 +141,7 @@ impl Client { } #[doc = "ping: GET /ping"] - pub async fn ping(&self) -> Result { + pub async fn ping<'a>(&'a self) -> Result { let url = format!("{}/ping", self.baseurl,); let request = self.client.get(url).build()?; let result = self.client.execute(request).await; @@ -150,9 +150,9 @@ impl Client { } #[doc = "report_finish: POST /report/finish"] - pub async fn report_finish( - &self, - body: &types::ReportFinishBody, + pub async fn report_finish<'a>( + &'a self, + body: &'a types::ReportFinishBody, ) -> Result { let url = format!("{}/report/finish", self.baseurl,); let request = self.client.post(url).json(body).build()?; @@ -162,9 +162,9 @@ impl Client { } #[doc = "report_output: POST /report/output"] - pub async fn report_output( - &self, - body: &types::ReportOutputBody, + pub async fn report_output<'a>( + &'a self, + body: &'a types::ReportOutputBody, ) -> Result { let url = format!("{}/report/output", self.baseurl,); let request = self.client.post(url).json(body).build()?; @@ -174,7 +174,10 @@ impl Client { } #[doc = "report_start: POST /report/start"] - pub async fn report_start(&self, body: &types::ReportStartBody) -> Result { + pub async fn report_start<'a>( + &'a self, + body: &'a types::ReportStartBody, + ) -> Result { let url = format!("{}/report/start", self.baseurl,); let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; diff --git a/progenitor-impl/tests/output/nexus.out b/progenitor-impl/tests/output/nexus.out index cefdf09..bd4ca74 100644 --- a/progenitor-impl/tests/output/nexus.out +++ b/progenitor-impl/tests/output/nexus.out @@ -30,6 +30,36 @@ pub mod types { } } + #[doc = "The type of an individual datum of a metric."] + #[derive(Serialize, Deserialize, Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] + pub enum DatumType { + Bool, + I64, + F64, + String, + Bytes, + CumulativeI64, + CumulativeF64, + HistogramI64, + HistogramF64, + } + + impl ToString for DatumType { + fn to_string(&self) -> String { + match self { + DatumType::Bool => "Bool".to_string(), + DatumType::I64 => "I64".to_string(), + DatumType::F64 => "F64".to_string(), + DatumType::String => "String".to_string(), + DatumType::Bytes => "Bytes".to_string(), + DatumType::CumulativeI64 => "CumulativeI64".to_string(), + DatumType::CumulativeF64 => "CumulativeF64".to_string(), + DatumType::HistogramI64 => "HistogramI64".to_string(), + DatumType::HistogramF64 => "HistogramF64".to_string(), + } + } + } + #[doc = "Client view of an [`Disk`]"] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Disk { @@ -58,19 +88,6 @@ pub mod types { pub time_modified: chrono::DateTime, } - #[doc = "Describes a Disk's attachment to an Instance"] - #[derive(Serialize, Deserialize, Debug, Clone)] - pub struct DiskAttachment { - #[serde(rename = "diskId")] - pub disk_id: uuid::Uuid, - #[serde(rename = "diskName")] - pub disk_name: Name, - #[serde(rename = "diskState")] - pub disk_state: DiskState, - #[serde(rename = "instanceId")] - pub instance_id: uuid::Uuid, - } - #[doc = "Create-time parameters for a [`Disk`]"] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct DiskCreate { @@ -86,6 +103,12 @@ pub mod types { pub snapshot_id: Option, } + #[doc = "Parameters for the [`Disk`] to be attached or detached to an instance"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct DiskIdentifier { + pub disk: Name, + } + #[doc = "A single page of results"] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct DiskResultsPage { @@ -115,6 +138,52 @@ pub mod types { Faulted, } + #[doc = "The name and type information for a field of a timeseries schema."] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct FieldSchema { + pub name: String, + pub source: FieldSource, + pub ty: FieldType, + } + + #[doc = "The source from which a field is derived, the target or metric."] + #[derive(Serialize, Deserialize, Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] + pub enum FieldSource { + Target, + Metric, + } + + impl ToString for FieldSource { + fn to_string(&self) -> String { + match self { + FieldSource::Target => "Target".to_string(), + FieldSource::Metric => "Metric".to_string(), + } + } + } + + #[doc = "The `FieldType` identifies the data type of a target or metric field."] + #[derive(Serialize, Deserialize, Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] + pub enum FieldType { + String, + I64, + IpAddr, + Uuid, + Bool, + } + + impl ToString for FieldType { + fn to_string(&self) -> String { + match self { + FieldType::String => "String".to_string(), + FieldType::I64 => "I64".to_string(), + FieldType::IpAddr => "IpAddr".to_string(), + FieldType::Uuid => "Uuid".to_string(), + FieldType::Bool => "Bool".to_string(), + } + } + } + #[doc = "Supported set of sort modes for scanning by id only.\n\nCurrently, we only support scanning in ascending order."] #[derive(Serialize, Deserialize, Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] pub enum IdSortMode { @@ -277,6 +346,16 @@ pub mod types { pub username: String, } + #[doc = "A Media Access Control address, in EUI-48 format"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct MacAddr(pub String); + impl std::ops::Deref for MacAddr { + type Target = String; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + #[doc = "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'."] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Name(pub String); @@ -323,6 +402,31 @@ pub mod types { } } + #[doc = "A `NetworkInterface` represents a virtual network interface device."] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct NetworkInterface { + pub identity: IdentityMetadata, + #[doc = "The Instance to which the interface belongs."] + pub instance_id: uuid::Uuid, + #[doc = "The IP address assigned to this interface."] + pub ip: String, + pub mac: MacAddr, + #[doc = "The subnet to which the interface belongs."] + pub subnet_id: uuid::Uuid, + #[doc = "The VPC to which the interface belongs."] + pub vpc_id: uuid::Uuid, + } + + #[doc = "A single page of results"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct NetworkInterfaceResultsPage { + #[doc = "list of items on this page of results"] + pub items: Vec, + #[doc = "token used to fetch the next page of results (if any)"] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub next_page: Option, + } + #[doc = "Client view of an [`Organization`]"] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Organization { @@ -425,6 +529,33 @@ pub mod types { pub next_page: Option, } + #[doc = "Client view of a [`Role`]"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct Role { + pub description: String, + pub name: RoleName, + } + + #[doc = "Role names consist of two string components separated by dot (\".\")."] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct RoleName(pub String); + impl std::ops::Deref for RoleName { + type Target = String; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + #[doc = "A single page of results"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct RoleResultsPage { + #[doc = "list of items on this page of results"] + pub items: Vec, + #[doc = "token used to fetch the next page of results (if any)"] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub next_page: Option, + } + #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(tag = "type", content = "value")] pub enum RouteDestination { @@ -585,6 +716,35 @@ pub mod types { pub next_page: Option, } + #[doc = "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words."] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct TimeseriesName(pub String); + impl std::ops::Deref for TimeseriesName { + type Target = String; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + #[doc = "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field."] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct TimeseriesSchema { + pub created: chrono::DateTime, + pub datum_type: DatumType, + pub field_schema: Vec, + pub timeseries_name: TimeseriesName, + } + + #[doc = "A single page of results"] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct TimeseriesSchemaResultsPage { + #[doc = "list of items on this page of results"] + pub items: Vec, + #[doc = "token used to fetch the next page of results (if any)"] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub next_page: Option, + } + #[doc = "Client view of a [`User`]"] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct User { @@ -976,10 +1136,10 @@ impl Client { } #[doc = "List racks in the system.\n\nhardware_racks_get: GET /hardware/racks"] - pub async fn hardware_racks_get( - &self, + pub async fn hardware_racks_get<'a>( + &'a self, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!("{}/hardware/racks", self.baseurl,); @@ -1002,8 +1162,44 @@ impl Client { Ok(res.json().await?) } + #[doc = "List racks in the system.\n\nreturns a Stream by making successive calls to hardware_racks_get"] + pub fn hardware_racks_get_stream<'a>( + &'a self, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.hardware_racks_get(limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.hardware_racks_get(None, state.as_deref(), None) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Fetch information about a particular rack.\n\nhardware_racks_get_rack: GET /hardware/racks/{rack_id}"] - pub async fn hardware_racks_get_rack(&self, rack_id: &uuid::Uuid) -> Result { + pub async fn hardware_racks_get_rack<'a>( + &'a self, + rack_id: &'a uuid::Uuid, + ) -> Result { let url = format!( "{}/hardware/racks/{}", self.baseurl, @@ -1016,10 +1212,10 @@ impl Client { } #[doc = "List sleds in the system.\n\nhardware_sleds_get: GET /hardware/sleds"] - pub async fn hardware_sleds_get( - &self, + pub async fn hardware_sleds_get<'a>( + &'a self, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!("{}/hardware/sleds", self.baseurl,); @@ -1042,8 +1238,44 @@ impl Client { Ok(res.json().await?) } + #[doc = "List sleds in the system.\n\nreturns a Stream by making successive calls to hardware_sleds_get"] + pub fn hardware_sleds_get_stream<'a>( + &'a self, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.hardware_sleds_get(limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.hardware_sleds_get(None, state.as_deref(), None) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Fetch information about a sled in the system.\n\nhardware_sleds_get_sled: GET /hardware/sleds/{sled_id}"] - pub async fn hardware_sleds_get_sled(&self, sled_id: &uuid::Uuid) -> Result { + pub async fn hardware_sleds_get_sled<'a>( + &'a self, + sled_id: &'a uuid::Uuid, + ) -> Result { let url = format!( "{}/hardware/sleds/{}", self.baseurl, @@ -1056,7 +1288,10 @@ impl Client { } #[doc = "spoof_login: POST /login"] - pub async fn spoof_login(&self, body: &types::LoginParams) -> Result { + pub async fn spoof_login<'a>( + &'a self, + body: &'a types::LoginParams, + ) -> Result { let url = format!("{}/login", self.baseurl,); let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; @@ -1065,7 +1300,7 @@ impl Client { } #[doc = "logout: POST /logout"] - pub async fn logout(&self) -> Result { + pub async fn logout<'a>(&'a self) -> Result { let url = format!("{}/logout", self.baseurl,); let request = self.client.post(url).build()?; let result = self.client.execute(request).await; @@ -1074,10 +1309,10 @@ impl Client { } #[doc = "List all organizations.\n\norganizations_get: GET /organizations"] - pub async fn organizations_get( - &self, + pub async fn organizations_get<'a>( + &'a self, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!("{}/organizations", self.baseurl,); @@ -1100,10 +1335,43 @@ impl Client { Ok(res.json().await?) } + #[doc = "List all organizations.\n\nreturns a Stream by making successive calls to organizations_get"] + pub fn organizations_get_stream<'a>( + &'a self, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.organizations_get(limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.organizations_get(None, state.as_deref(), None) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a new organization.\n\norganizations_post: POST /organizations"] - pub async fn organizations_post( - &self, - body: &types::OrganizationCreate, + pub async fn organizations_post<'a>( + &'a self, + body: &'a types::OrganizationCreate, ) -> Result { let url = format!("{}/organizations", self.baseurl,); let request = self.client.post(url).json(body).build()?; @@ -1113,9 +1381,9 @@ impl Client { } #[doc = "Fetch a specific organization\n\norganizations_get_organization: GET /organizations/{organization_name}"] - pub async fn organizations_get_organization( - &self, - organization_name: &types::Name, + pub async fn organizations_get_organization<'a>( + &'a self, + organization_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}", @@ -1129,10 +1397,10 @@ impl Client { } #[doc = "Update a specific organization.\n * TODO-correctness: Is it valid for PUT to accept application/json that's a subset of what the resource actually represents? If not, is that a problem? (HTTP may require that this be idempotent.) If so, can we get around that having this be a slightly different content-type (e.g., \"application/json-patch\")? We should see what other APIs do.\n\norganizations_put_organization: PUT /organizations/{organization_name}"] - pub async fn organizations_put_organization( - &self, - organization_name: &types::Name, - body: &types::OrganizationUpdate, + pub async fn organizations_put_organization<'a>( + &'a self, + organization_name: &'a types::Name, + body: &'a types::OrganizationUpdate, ) -> Result { let url = format!( "{}/organizations/{}", @@ -1146,10 +1414,10 @@ impl Client { } #[doc = "Delete a specific organization.\n\norganizations_delete_organization: DELETE /organizations/{organization_name}"] - pub async fn organizations_delete_organization( - &self, - organization_name: &types::Name, - ) -> Result<()> { + pub async fn organizations_delete_organization<'a>( + &'a self, + organization_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}", self.baseurl, @@ -1158,15 +1426,15 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List all projects.\n\norganization_projects_get: GET /organizations/{organization_name}/projects"] - pub async fn organization_projects_get( - &self, - organization_name: &types::Name, + pub async fn organization_projects_get<'a>( + &'a self, + organization_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1193,11 +1461,50 @@ impl Client { Ok(res.json().await?) } + #[doc = "List all projects.\n\nreturns a Stream by making successive calls to organization_projects_get"] + pub fn organization_projects_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.organization_projects_get(organization_name, limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.organization_projects_get( + organization_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a new project.\n\norganization_projects_post: POST /organizations/{organization_name}/projects"] - pub async fn organization_projects_post( - &self, - organization_name: &types::Name, - body: &types::ProjectCreate, + pub async fn organization_projects_post<'a>( + &'a self, + organization_name: &'a types::Name, + body: &'a types::ProjectCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects", @@ -1211,10 +1518,10 @@ impl Client { } #[doc = "Fetch a specific project\n\norganization_projects_get_project: GET /organizations/{organization_name}/projects/{project_name}"] - pub async fn organization_projects_get_project( - &self, - organization_name: &types::Name, - project_name: &types::Name, + pub async fn organization_projects_get_project<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}", @@ -1229,11 +1536,11 @@ impl Client { } #[doc = "Update a specific project.\n * TODO-correctness: Is it valid for PUT to accept application/json that's a subset of what the resource actually represents? If not, is that a problem? (HTTP may require that this be idempotent.) If so, can we get around that having this be a slightly different content-type (e.g., \"application/json-patch\")? We should see what other APIs do.\n\norganization_projects_put_project: PUT /organizations/{organization_name}/projects/{project_name}"] - pub async fn organization_projects_put_project( - &self, - organization_name: &types::Name, - project_name: &types::Name, - body: &types::ProjectUpdate, + pub async fn organization_projects_put_project<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + body: &'a types::ProjectUpdate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}", @@ -1248,11 +1555,11 @@ impl Client { } #[doc = "Delete a specific project.\n\norganization_projects_delete_project: DELETE /organizations/{organization_name}/projects/{project_name}"] - pub async fn organization_projects_delete_project( - &self, - organization_name: &types::Name, - project_name: &types::Name, - ) -> Result<()> { + pub async fn organization_projects_delete_project<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}", self.baseurl, @@ -1262,16 +1569,16 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List disks in a project.\n\nproject_disks_get: GET /organizations/{organization_name}/projects/{project_name}/disks"] - pub async fn project_disks_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, + pub async fn project_disks_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1299,12 +1606,53 @@ impl Client { Ok(res.json().await?) } + #[doc = "List disks in a project.\n\nreturns a Stream by making successive calls to project_disks_get"] + pub fn project_disks_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.project_disks_get(organization_name, project_name, limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.project_disks_get( + organization_name, + project_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a disk in a project.\n * TODO-correctness See note about instance create. This should be async.\n\nproject_disks_post: POST /organizations/{organization_name}/projects/{project_name}/disks"] - pub async fn project_disks_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - body: &types::DiskCreate, + pub async fn project_disks_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + body: &'a types::DiskCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/disks", @@ -1319,11 +1667,11 @@ impl Client { } #[doc = "Fetch a single disk in a project.\n\nproject_disks_get_disk: GET /organizations/{organization_name}/projects/{project_name}/disks/{disk_name}"] - pub async fn project_disks_get_disk( - &self, - organization_name: &types::Name, - project_name: &types::Name, - disk_name: &types::Name, + pub async fn project_disks_get_disk<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + disk_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/disks/{}", @@ -1339,12 +1687,12 @@ impl Client { } #[doc = "Delete a disk from a project.\n\nproject_disks_delete_disk: DELETE /organizations/{organization_name}/projects/{project_name}/disks/{disk_name}"] - pub async fn project_disks_delete_disk( - &self, - organization_name: &types::Name, - project_name: &types::Name, - disk_name: &types::Name, - ) -> Result<()> { + pub async fn project_disks_delete_disk<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + disk_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/disks/{}", self.baseurl, @@ -1355,16 +1703,16 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List instances in a project.\n\nproject_instances_get: GET /organizations/{organization_name}/projects/{project_name}/instances"] - pub async fn project_instances_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, + pub async fn project_instances_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1392,12 +1740,53 @@ impl Client { Ok(res.json().await?) } + #[doc = "List instances in a project.\n\nreturns a Stream by making successive calls to project_instances_get"] + pub fn project_instances_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.project_instances_get(organization_name, project_name, limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.project_instances_get( + organization_name, + project_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create an instance in a project.\n * TODO-correctness This is supposed to be async. Is that right? We can create the instance immediately -- it's just not booted yet. Maybe the boot operation is what's a separate operation_id. What about the response code (201 Created vs 202 Accepted)? Is that orthogonal? Things can return a useful response, including an operation id, with either response code. Maybe a \"reboot\" operation would return a 202 Accepted because there's no actual resource created?\n\nproject_instances_post: POST /organizations/{organization_name}/projects/{project_name}/instances"] - pub async fn project_instances_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - body: &types::InstanceCreate, + pub async fn project_instances_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + body: &'a types::InstanceCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances", @@ -1412,11 +1801,11 @@ impl Client { } #[doc = "Get an instance in a project.\n\nproject_instances_get_instance: GET /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}"] - pub async fn project_instances_get_instance( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, + pub async fn project_instances_get_instance<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}", @@ -1432,12 +1821,12 @@ impl Client { } #[doc = "Delete an instance from a project.\n\nproject_instances_delete_instance: DELETE /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}"] - pub async fn project_instances_delete_instance( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, - ) -> Result<()> { + pub async fn project_instances_delete_instance<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}", self.baseurl, @@ -1448,16 +1837,19 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List disks attached to this instance.\n\ninstance_disks_get: GET /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks"] - pub async fn instance_disks_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, - ) -> Result> { + pub async fn instance_disks_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, + limit: Option, + page_token: Option<&'a str>, + sort_by: Option, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}/disks", self.baseurl, @@ -1465,84 +1857,123 @@ impl Client { progenitor_support::encode_path(&project_name.to_string()), progenitor_support::encode_path(&instance_name.to_string()), ); - let request = self.client.get(url).build()?; + let mut query = Vec::new(); + if let Some(v) = &limit { + query.push(("limit", v.to_string())); + } + + if let Some(v) = &page_token { + query.push(("page_token", v.to_string())); + } + + if let Some(v) = &sort_by { + query.push(("sort_by", v.to_string())); + } + + let request = self.client.get(url).query(&query).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; Ok(res.json().await?) } - #[doc = "Fetch a description of the attachment of this disk to this instance.\n\ninstance_disks_get_disk: GET /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/{disk_name}"] - pub async fn instance_disks_get_disk( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, - disk_name: &types::Name, - ) -> Result { + #[doc = "List disks attached to this instance.\n\nreturns a Stream by making successive calls to instance_disks_get"] + pub fn instance_disks_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.instance_disks_get( + organization_name, + project_name, + instance_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.instance_disks_get( + organization_name, + project_name, + instance_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + + #[doc = "instance_disks_attach: POST /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/attach"] + pub async fn instance_disks_attach<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, + body: &'a types::DiskIdentifier, + ) -> Result { let url = format!( - "{}/organizations/{}/projects/{}/instances/{}/disks/{}", + "{}/organizations/{}/projects/{}/instances/{}/disks/attach", self.baseurl, progenitor_support::encode_path(&organization_name.to_string()), progenitor_support::encode_path(&project_name.to_string()), progenitor_support::encode_path(&instance_name.to_string()), - progenitor_support::encode_path(&disk_name.to_string()), ); - let request = self.client.get(url).build()?; + let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; Ok(res.json().await?) } - #[doc = "Attach a disk to this instance.\n\ninstance_disks_put_disk: PUT /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/{disk_name}"] - pub async fn instance_disks_put_disk( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, - disk_name: &types::Name, - ) -> Result { + #[doc = "instance_disks_detach: POST /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/detach"] + pub async fn instance_disks_detach<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, + body: &'a types::DiskIdentifier, + ) -> Result { let url = format!( - "{}/organizations/{}/projects/{}/instances/{}/disks/{}", + "{}/organizations/{}/projects/{}/instances/{}/disks/detach", self.baseurl, progenitor_support::encode_path(&organization_name.to_string()), progenitor_support::encode_path(&project_name.to_string()), progenitor_support::encode_path(&instance_name.to_string()), - progenitor_support::encode_path(&disk_name.to_string()), ); - let request = self.client.put(url).build()?; + let request = self.client.post(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; Ok(res.json().await?) } - #[doc = "Detach a disk from this instance.\n\ninstance_disks_delete_disk: DELETE /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/{disk_name}"] - pub async fn instance_disks_delete_disk( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, - disk_name: &types::Name, - ) -> Result<()> { - let url = format!( - "{}/organizations/{}/projects/{}/instances/{}/disks/{}", - self.baseurl, - progenitor_support::encode_path(&organization_name.to_string()), - progenitor_support::encode_path(&project_name.to_string()), - progenitor_support::encode_path(&instance_name.to_string()), - progenitor_support::encode_path(&disk_name.to_string()), - ); - let request = self.client.delete(url).build()?; - let result = self.client.execute(request).await; - let res = result?.error_for_status()?; - Ok(()) - } - #[doc = "Reboot an instance.\n\nproject_instances_instance_reboot: POST /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/reboot"] - pub async fn project_instances_instance_reboot( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, + pub async fn project_instances_instance_reboot<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}/reboot", @@ -1558,11 +1989,11 @@ impl Client { } #[doc = "Boot an instance.\n\nproject_instances_instance_start: POST /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/start"] - pub async fn project_instances_instance_start( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, + pub async fn project_instances_instance_start<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}/start", @@ -1578,11 +2009,11 @@ impl Client { } #[doc = "Halt an instance.\n\nproject_instances_instance_stop: POST /organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/stop"] - pub async fn project_instances_instance_stop( - &self, - organization_name: &types::Name, - project_name: &types::Name, - instance_name: &types::Name, + pub async fn project_instances_instance_stop<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + instance_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/instances/{}/stop", @@ -1598,12 +2029,12 @@ impl Client { } #[doc = "List VPCs in a project.\n\nproject_vpcs_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs"] - pub async fn project_vpcs_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, + pub async fn project_vpcs_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1631,12 +2062,53 @@ impl Client { Ok(res.json().await?) } + #[doc = "List VPCs in a project.\n\nreturns a Stream by making successive calls to project_vpcs_get"] + pub fn project_vpcs_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.project_vpcs_get(organization_name, project_name, limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.project_vpcs_get( + organization_name, + project_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a VPC in a project.\n\nproject_vpcs_post: POST /organizations/{organization_name}/projects/{project_name}/vpcs"] - pub async fn project_vpcs_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - body: &types::VpcCreate, + pub async fn project_vpcs_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + body: &'a types::VpcCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs", @@ -1651,11 +2123,11 @@ impl Client { } #[doc = "Get a VPC in a project.\n\nproject_vpcs_get_vpc: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}"] - pub async fn project_vpcs_get_vpc( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, + pub async fn project_vpcs_get_vpc<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}", @@ -1671,13 +2143,13 @@ impl Client { } #[doc = "Update a VPC.\n\nproject_vpcs_put_vpc: PUT /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}"] - pub async fn project_vpcs_put_vpc( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - body: &types::VpcUpdate, - ) -> Result<()> { + pub async fn project_vpcs_put_vpc<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + body: &'a types::VpcUpdate, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}", self.baseurl, @@ -1688,16 +2160,16 @@ impl Client { let request = self.client.put(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "Delete a vpc from a project.\n\nproject_vpcs_delete_vpc: DELETE /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}"] - pub async fn project_vpcs_delete_vpc( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - ) -> Result<()> { + pub async fn project_vpcs_delete_vpc<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}", self.baseurl, @@ -1708,17 +2180,17 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List firewall rules for a VPC.\n\nvpc_firewall_rules_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/firewall/rules"] - pub async fn vpc_firewall_rules_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, + pub async fn vpc_firewall_rules_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1747,13 +2219,63 @@ impl Client { Ok(res.json().await?) } + #[doc = "List firewall rules for a VPC.\n\nreturns a Stream by making successive calls to vpc_firewall_rules_get"] + pub fn vpc_firewall_rules_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.vpc_firewall_rules_get( + organization_name, + project_name, + vpc_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.vpc_firewall_rules_get( + organization_name, + project_name, + vpc_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Replace the firewall rules for a VPC\n\nvpc_firewall_rules_put: PUT /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/firewall/rules"] - pub async fn vpc_firewall_rules_put( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - body: &types::VpcFirewallRuleUpdateParams, + pub async fn vpc_firewall_rules_put<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + body: &'a types::VpcFirewallRuleUpdateParams, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/firewall/rules", @@ -1769,13 +2291,13 @@ impl Client { } #[doc = "List VPC Custom and System Routers\n\nvpc_routers_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers"] - pub async fn vpc_routers_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, + pub async fn vpc_routers_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1804,13 +2326,63 @@ impl Client { Ok(res.json().await?) } + #[doc = "List VPC Custom and System Routers\n\nreturns a Stream by making successive calls to vpc_routers_get"] + pub fn vpc_routers_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.vpc_routers_get( + organization_name, + project_name, + vpc_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.vpc_routers_get( + organization_name, + project_name, + vpc_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a VPC Router\n\nvpc_routers_post: POST /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers"] - pub async fn vpc_routers_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - body: &types::VpcRouterCreate, + pub async fn vpc_routers_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + body: &'a types::VpcRouterCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers", @@ -1826,12 +2398,12 @@ impl Client { } #[doc = "Get a VPC Router\n\nvpc_routers_get_router: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}"] - pub async fn vpc_routers_get_router( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, + pub async fn vpc_routers_get_router<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}", @@ -1848,14 +2420,14 @@ impl Client { } #[doc = "Update a VPC Router\n\nvpc_routers_put_router: PUT /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}"] - pub async fn vpc_routers_put_router( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - body: &types::VpcRouterUpdate, - ) -> Result<()> { + pub async fn vpc_routers_put_router<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + body: &'a types::VpcRouterUpdate, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}", self.baseurl, @@ -1867,17 +2439,17 @@ impl Client { let request = self.client.put(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "Delete a router from its VPC\n\nvpc_routers_delete_router: DELETE /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}"] - pub async fn vpc_routers_delete_router( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - ) -> Result<()> { + pub async fn vpc_routers_delete_router<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}", self.baseurl, @@ -1889,18 +2461,18 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List a Router's routes\n\nrouters_routes_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes"] - pub async fn routers_routes_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, + pub async fn routers_routes_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -1930,14 +2502,67 @@ impl Client { Ok(res.json().await?) } + #[doc = "List a Router's routes\n\nreturns a Stream by making successive calls to routers_routes_get"] + pub fn routers_routes_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.routers_routes_get( + organization_name, + project_name, + vpc_name, + router_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.routers_routes_get( + organization_name, + project_name, + vpc_name, + router_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a VPC Router\n\nrouters_routes_post: POST /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes"] - pub async fn routers_routes_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - body: &types::RouterRouteCreateParams, + pub async fn routers_routes_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + body: &'a types::RouterRouteCreateParams, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}/routes", @@ -1954,13 +2579,13 @@ impl Client { } #[doc = "Get a VPC Router route\n\nrouters_routes_get_route: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}"] - pub async fn routers_routes_get_route( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - route_name: &types::Name, + pub async fn routers_routes_get_route<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + route_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}/routes/{}", @@ -1978,15 +2603,15 @@ impl Client { } #[doc = "Update a Router route\n\nrouters_routes_put_route: PUT /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}"] - pub async fn routers_routes_put_route( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - route_name: &types::Name, - body: &types::RouterRouteUpdateParams, - ) -> Result<()> { + pub async fn routers_routes_put_route<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + route_name: &'a types::Name, + body: &'a types::RouterRouteUpdateParams, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}/routes/{}", self.baseurl, @@ -1999,18 +2624,18 @@ impl Client { let request = self.client.put(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "Delete a route from its router\n\nrouters_routes_delete_route: DELETE /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/routers/{router_name}/routes/{route_name}"] - pub async fn routers_routes_delete_route( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - router_name: &types::Name, - route_name: &types::Name, - ) -> Result<()> { + pub async fn routers_routes_delete_route<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + router_name: &'a types::Name, + route_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/routers/{}/routes/{}", self.baseurl, @@ -2023,17 +2648,17 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "List subnets in a VPC.\n\nvpc_subnets_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets"] - pub async fn vpc_subnets_get( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, + pub async fn vpc_subnets_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!( @@ -2062,13 +2687,63 @@ impl Client { Ok(res.json().await?) } + #[doc = "List subnets in a VPC.\n\nreturns a Stream by making successive calls to vpc_subnets_get"] + pub fn vpc_subnets_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.vpc_subnets_get( + organization_name, + project_name, + vpc_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.vpc_subnets_get( + organization_name, + project_name, + vpc_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Create a subnet in a VPC.\n\nvpc_subnets_post: POST /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets"] - pub async fn vpc_subnets_post( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - body: &types::VpcSubnetCreate, + pub async fn vpc_subnets_post<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + body: &'a types::VpcSubnetCreate, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/subnets", @@ -2084,12 +2759,12 @@ impl Client { } #[doc = "Get subnet in a VPC.\n\nvpc_subnets_get_subnet: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}"] - pub async fn vpc_subnets_get_subnet( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - subnet_name: &types::Name, + pub async fn vpc_subnets_get_subnet<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + subnet_name: &'a types::Name, ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/subnets/{}", @@ -2106,14 +2781,14 @@ impl Client { } #[doc = "Update a VPC Subnet.\n\nvpc_subnets_put_subnet: PUT /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}"] - pub async fn vpc_subnets_put_subnet( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - subnet_name: &types::Name, - body: &types::VpcSubnetUpdate, - ) -> Result<()> { + pub async fn vpc_subnets_put_subnet<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + subnet_name: &'a types::Name, + body: &'a types::VpcSubnetUpdate, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/subnets/{}", self.baseurl, @@ -2125,17 +2800,17 @@ impl Client { let request = self.client.put(url).json(body).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) } #[doc = "Delete a subnet from a VPC.\n\nvpc_subnets_delete_subnet: DELETE /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}"] - pub async fn vpc_subnets_delete_subnet( - &self, - organization_name: &types::Name, - project_name: &types::Name, - vpc_name: &types::Name, - subnet_name: &types::Name, - ) -> Result<()> { + pub async fn vpc_subnets_delete_subnet<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + subnet_name: &'a types::Name, + ) -> Result { let url = format!( "{}/organizations/{}/projects/{}/vpcs/{}/subnets/{}", self.baseurl, @@ -2147,14 +2822,172 @@ impl Client { let request = self.client.delete(url).build()?; let result = self.client.execute(request).await; let res = result?.error_for_status()?; - Ok(()) + Ok(res) + } + + #[doc = "List IP addresses on a VPC subnet.\n\nsubnets_ips_get: GET /organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}/ips"] + pub async fn subnets_ips_get<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + subnet_name: &'a types::Name, + limit: Option, + page_token: Option<&'a str>, + sort_by: Option, + ) -> Result { + let url = format!( + "{}/organizations/{}/projects/{}/vpcs/{}/subnets/{}/ips", + self.baseurl, + progenitor_support::encode_path(&organization_name.to_string()), + progenitor_support::encode_path(&project_name.to_string()), + progenitor_support::encode_path(&vpc_name.to_string()), + progenitor_support::encode_path(&subnet_name.to_string()), + ); + let mut query = Vec::new(); + if let Some(v) = &limit { + query.push(("limit", v.to_string())); + } + + if let Some(v) = &page_token { + query.push(("page_token", v.to_string())); + } + + if let Some(v) = &sort_by { + query.push(("sort_by", v.to_string())); + } + + let request = self.client.get(url).query(&query).build()?; + let result = self.client.execute(request).await; + let res = result?.error_for_status()?; + Ok(res.json().await?) + } + + #[doc = "List IP addresses on a VPC subnet.\n\nreturns a Stream by making successive calls to subnets_ips_get"] + pub fn subnets_ips_get_stream<'a>( + &'a self, + organization_name: &'a types::Name, + project_name: &'a types::Name, + vpc_name: &'a types::Name, + subnet_name: &'a types::Name, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.subnets_ips_get( + organization_name, + project_name, + vpc_name, + subnet_name, + limit, + None, + sort_by, + ) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.subnets_ips_get( + organization_name, + project_name, + vpc_name, + subnet_name, + None, + state.as_deref(), + None, + ) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + + #[doc = "List the built-in roles\n\nroles_get: GET /roles"] + pub async fn roles_get<'a>( + &'a self, + limit: Option, + page_token: Option<&'a str>, + ) -> Result { + let url = format!("{}/roles", self.baseurl,); + let mut query = Vec::new(); + if let Some(v) = &limit { + query.push(("limit", v.to_string())); + } + + if let Some(v) = &page_token { + query.push(("page_token", v.to_string())); + } + + let request = self.client.get(url).query(&query).build()?; + let result = self.client.execute(request).await; + let res = result?.error_for_status()?; + Ok(res.json().await?) + } + + #[doc = "List the built-in roles\n\nreturns a Stream by making successive calls to roles_get"] + pub fn roles_get_stream<'a>( + &'a self, + limit: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.roles_get(limit, None) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.roles_get(None, state.as_deref()) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + + #[doc = "Fetch a specific built-in role\n\nroles_get_role: GET /roles/{role_name}"] + pub async fn roles_get_role<'a>(&'a self, role_name: &'a str) -> Result { + let url = format!( + "{}/roles/{}", + self.baseurl, + progenitor_support::encode_path(&role_name.to_string()), + ); + let request = self.client.get(url).build()?; + let result = self.client.execute(request).await; + let res = result?.error_for_status()?; + Ok(res.json().await?) } #[doc = "List all sagas (for debugging)\n\nsagas_get: GET /sagas"] - pub async fn sagas_get( - &self, + pub async fn sagas_get<'a>( + &'a self, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!("{}/sagas", self.baseurl,); @@ -2177,8 +3010,41 @@ impl Client { Ok(res.json().await?) } + #[doc = "List all sagas (for debugging)\n\nreturns a Stream by making successive calls to sagas_get"] + pub fn sagas_get_stream<'a>( + &'a self, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.sagas_get(limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.sagas_get(None, state.as_deref(), None) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Fetch information about a single saga (for debugging)\n\nsagas_get_saga: GET /sagas/{saga_id}"] - pub async fn sagas_get_saga(&self, saga_id: &uuid::Uuid) -> Result { + pub async fn sagas_get_saga<'a>(&'a self, saga_id: &'a uuid::Uuid) -> Result { let url = format!( "{}/sagas/{}", self.baseurl, @@ -2190,11 +3056,65 @@ impl Client { Ok(res.json().await?) } - #[doc = "List the built-in system users\n\nusers_get: GET /users"] - pub async fn users_get( - &self, + #[doc = "List all timeseries schema\n\ntimeseries_schema_get: GET /timeseries/schema"] + pub async fn timeseries_schema_get<'a>( + &'a self, limit: Option, - page_token: Option<&str>, + page_token: Option<&'a str>, + ) -> Result { + let url = format!("{}/timeseries/schema", self.baseurl,); + let mut query = Vec::new(); + if let Some(v) = &limit { + query.push(("limit", v.to_string())); + } + + if let Some(v) = &page_token { + query.push(("page_token", v.to_string())); + } + + let request = self.client.get(url).query(&query).build()?; + let result = self.client.execute(request).await; + let res = result?.error_for_status()?; + Ok(res.json().await?) + } + + #[doc = "List all timeseries schema\n\nreturns a Stream by making successive calls to timeseries_schema_get"] + pub fn timeseries_schema_get_stream<'a>( + &'a self, + limit: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.timeseries_schema_get(limit, None) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.timeseries_schema_get(None, state.as_deref()) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + + #[doc = "List the built-in system users\n\nusers_get: GET /users"] + pub async fn users_get<'a>( + &'a self, + limit: Option, + page_token: Option<&'a str>, sort_by: Option, ) -> Result { let url = format!("{}/users", self.baseurl,); @@ -2217,8 +3137,41 @@ impl Client { Ok(res.json().await?) } + #[doc = "List the built-in system users\n\nreturns a Stream by making successive calls to users_get"] + pub fn users_get_stream<'a>( + &'a self, + limit: Option, + sort_by: Option, + ) -> impl futures::Stream> + Unpin + '_ { + use futures::StreamExt; + use futures::TryFutureExt; + use futures::TryStreamExt; + self.users_get(limit, None, sort_by) + .map_ok(move |page| { + let first = futures::stream::iter(page.items.into_iter().map(Ok)); + let rest = futures::stream::try_unfold(page.next_page, move |state| async move { + if state.is_none() { + Ok(None) + } else { + self.users_get(None, state.as_deref(), None) + .map_ok(|page| { + Some(( + futures::stream::iter(page.items.into_iter().map(Ok)), + page.next_page, + )) + }) + .await + } + }) + .try_flatten(); + first.chain(rest) + }) + .try_flatten_stream() + .boxed() + } + #[doc = "Fetch a specific built-in system user\n\nusers_get_user: GET /users/{user_name}"] - pub async fn users_get_user(&self, user_name: &types::Name) -> Result { + pub async fn users_get_user<'a>(&'a self, user_name: &'a types::Name) -> Result { let url = format!( "{}/users/{}", self.baseurl, diff --git a/progenitor/Cargo.toml b/progenitor/Cargo.toml index 0e680b9..a7490fc 100644 --- a/progenitor/Cargo.toml +++ b/progenitor/Cargo.toml @@ -14,3 +14,10 @@ getopts = "0.2" openapiv3 = "1.0.0-beta.5" serde = { version = "1.0", features = [ "derive" ] } serde_json = "1.0" + +[dev-dependencies] +chrono = { version = "0.4", features = ["serde"] } +futures = "0.3" +percent-encoding = "2.1" +reqwest = { version = "0.11", features = ["json", "stream"] } +uuid = { version = "0.8", features = ["serde", "v4"] } diff --git a/progenitor/tests/build_buildomat.rs b/progenitor/tests/build_buildomat.rs new file mode 100644 index 0000000..d9b2d5e --- /dev/null +++ b/progenitor/tests/build_buildomat.rs @@ -0,0 +1,3 @@ +// Copyright 2021 Oxide Computer Company + +progenitor::generate_api!("../sample_openapi/buildomat.json"); diff --git a/progenitor/tests/build_keeper.rs b/progenitor/tests/build_keeper.rs new file mode 100644 index 0000000..5bcfdb5 --- /dev/null +++ b/progenitor/tests/build_keeper.rs @@ -0,0 +1,3 @@ +// Copyright 2021 Oxide Computer Company + +progenitor::generate_api!("../sample_openapi/keeper.json"); diff --git a/progenitor/tests/build_nexus.rs b/progenitor/tests/build_nexus.rs new file mode 100644 index 0000000..7f703b1 --- /dev/null +++ b/progenitor/tests/build_nexus.rs @@ -0,0 +1,3 @@ +// Copyright 2021 Oxide Computer Company + +progenitor::generate_api!("../sample_openapi/nexus.json"); diff --git a/sample_openapi/nexus.json b/sample_openapi/nexus.json index d56bd3a..fe6af15 100644 --- a/sample_openapi/nexus.json +++ b/sample_openapi/nexus.json @@ -187,13 +187,21 @@ }, "required": true }, - "responses": {} + "responses": { + "default": { + "description": "" + } + } } }, "/logout": { "post": { "operationId": "logout", - "responses": {} + "responses": { + "default": { + "description": "" + } + } } }, "/organizations": { @@ -974,6 +982,36 @@ "description": "List disks attached to this instance.", "operationId": "instance_disks_get", "parameters": [ + { + "in": "query", + "name": "limit", + "schema": { + "nullable": true, + "description": "Maximum number of items returned by a single call", + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "schema": { + "nullable": true, + "description": "Token returned by previous call to retreive the subsequent page", + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameSortMode" + }, + "style": "form" + }, { "in": "path", "name": "instance_name", @@ -1008,11 +1046,64 @@ "content": { "application/json": { "schema": { - "title": "Array_of_DiskAttachment", - "type": "array", - "items": { - "$ref": "#/components/schemas/DiskAttachment" - } + "$ref": "#/components/schemas/DiskResultsPage" + } + } + } + } + }, + "x-dropshot-pagination": true + } + }, + "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/attach": { + "post": { + "operationId": "instance_disks_attach", + "parameters": [ + { + "in": "path", + "name": "instance_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "organization_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "project_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskIdentifier" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Disk" } } } @@ -1020,20 +1111,10 @@ } } }, - "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/{disk_name}": { - "get": { - "description": "Fetch a description of the attachment of this disk to this instance.", - "operationId": "instance_disks_get_disk", + "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/disks/detach": { + "post": { + "operationId": "instance_disks_detach", "parameters": [ - { - "in": "path", - "name": "disk_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, { "in": "path", "name": "instance_name", @@ -1062,119 +1143,28 @@ "style": "simple" } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskIdentifier" + } + } + }, + "required": true + }, "responses": { - "200": { - "description": "successful operation", + "202": { + "description": "successfully enqueued operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/DiskAttachment" + "$ref": "#/components/schemas/Disk" } } } } } - }, - "put": { - "description": "Attach a disk to this instance.", - "operationId": "instance_disks_put_disk", - "parameters": [ - { - "in": "path", - "name": "disk_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "instance_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "organization_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "project_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - } - ], - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DiskAttachment" - } - } - } - } - } - }, - "delete": { - "description": "Detach a disk from this instance.", - "operationId": "instance_disks_delete_disk", - "parameters": [ - { - "in": "path", - "name": "disk_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "instance_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "organization_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - }, - { - "in": "path", - "name": "project_name", - "required": true, - "schema": { - "$ref": "#/components/schemas/Name" - }, - "style": "simple" - } - ], - "responses": { - "204": { - "description": "successful deletion" - } - } } }, "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/reboot": { @@ -1522,8 +1512,8 @@ "required": true }, "responses": { - "200": { - "description": "successful operation" + "204": { + "description": "resource updated" } } }, @@ -1939,8 +1929,8 @@ "required": true }, "responses": { - "200": { - "description": "successful operation" + "204": { + "description": "resource updated" } } }, @@ -2268,8 +2258,8 @@ "required": true }, "responses": { - "200": { - "description": "successful operation" + "204": { + "description": "resource updated" } } }, @@ -2570,8 +2560,8 @@ "required": true }, "responses": { - "200": { - "description": "successful operation" + "204": { + "description": "resource updated" } } }, @@ -2623,6 +2613,166 @@ } } }, + "/organizations/{organization_name}/projects/{project_name}/vpcs/{vpc_name}/subnets/{subnet_name}/ips": { + "get": { + "description": "List IP addresses on a VPC subnet.", + "operationId": "subnets_ips_get", + "parameters": [ + { + "in": "query", + "name": "limit", + "schema": { + "nullable": true, + "description": "Maximum number of items returned by a single call", + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "schema": { + "nullable": true, + "description": "Token returned by previous call to retreive the subsequent page", + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameSortMode" + }, + "style": "form" + }, + { + "in": "path", + "name": "organization_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "project_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "subnet_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "vpc_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NetworkInterfaceResultsPage" + } + } + } + } + }, + "x-dropshot-pagination": true + } + }, + "/roles": { + "get": { + "description": "List the built-in roles", + "operationId": "roles_get", + "parameters": [ + { + "in": "query", + "name": "limit", + "schema": { + "nullable": true, + "description": "Maximum number of items returned by a single call", + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "schema": { + "nullable": true, + "description": "Token returned by previous call to retreive the subsequent page", + "type": "string" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RoleResultsPage" + } + } + } + } + }, + "x-dropshot-pagination": true + } + }, + "/roles/{role_name}": { + "get": { + "description": "Fetch a specific built-in role", + "operationId": "roles_get_role", + "parameters": [ + { + "in": "path", + "name": "role_name", + "required": true, + "schema": { + "description": "The built-in role's unique name.", + "type": "string" + }, + "style": "simple" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Role" + } + } + } + } + } + } + }, "/sagas": { "get": { "description": "List all sagas (for debugging)", @@ -2704,6 +2854,49 @@ } } }, + "/timeseries/schema": { + "get": { + "description": "List all timeseries schema", + "operationId": "timeseries_schema_get", + "parameters": [ + { + "in": "query", + "name": "limit", + "schema": { + "nullable": true, + "description": "Maximum number of items returned by a single call", + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "schema": { + "nullable": true, + "description": "Token returned by previous call to retreive the subsequent page", + "type": "string" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesSchemaResultsPage" + } + } + } + } + }, + "x-dropshot-pagination": true + } + }, "/users": { "get": { "description": "List the built-in system users", @@ -2793,6 +2986,21 @@ "format": "uint64", "minimum": 0 }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "Bool", + "I64", + "F64", + "String", + "Bytes", + "CumulativeI64", + "CumulativeF64", + "HistogramI64", + "HistogramF64" + ] + }, "Disk": { "description": "Client view of an [`Disk`]", "type": "object", @@ -2855,32 +3063,6 @@ "timeModified" ] }, - "DiskAttachment": { - "description": "Describes a Disk's attachment to an Instance", - "type": "object", - "properties": { - "diskId": { - "type": "string", - "format": "uuid" - }, - "diskName": { - "$ref": "#/components/schemas/Name" - }, - "diskState": { - "$ref": "#/components/schemas/DiskState" - }, - "instanceId": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "diskId", - "diskName", - "diskState", - "instanceId" - ] - }, "DiskCreate": { "description": "Create-time parameters for a [`Disk`]", "type": "object", @@ -2912,6 +3094,18 @@ "size" ] }, + "DiskIdentifier": { + "description": "Parameters for the [`Disk`] to be attached or detached to an instance", + "type": "object", + "properties": { + "disk": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "disk" + ] + }, "DiskResultsPage": { "description": "A single page of results", "type": "object", @@ -3058,6 +3252,45 @@ } ] }, + "FieldSchema": { + "description": "The name and type information for a field of a timeseries schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "source": { + "$ref": "#/components/schemas/FieldSource" + }, + "ty": { + "$ref": "#/components/schemas/FieldType" + } + }, + "required": [ + "name", + "source", + "ty" + ] + }, + "FieldSource": { + "description": "The source from which a field is derived, the target or metric.", + "type": "string", + "enum": [ + "Target", + "Metric" + ] + }, + "FieldType": { + "description": "The `FieldType` identifies the data type of a target or metric field.", + "type": "string", + "enum": [ + "String", + "I64", + "IpAddr", + "Uuid", + "Bool" + ] + }, "IdentityMetadata": { "description": "Identity-related metadata that's included in nearly all public API objects", "type": "object", @@ -3279,6 +3512,14 @@ "username" ] }, + "MacAddr": { + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$", + "minLength": 17, + "maxLength": 17 + }, "Name": { "title": "A name used in the API", "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'.", @@ -3286,6 +3527,77 @@ "pattern": "[a-z](|[a-zA-Z0-9-]*[a-zA-Z0-9])", "maxLength": 63 }, + "NetworkInterface": { + "description": "A `NetworkInterface` represents a virtual network interface device.", + "type": "object", + "properties": { + "identity": { + "description": "common identifying metadata", + "allOf": [ + { + "$ref": "#/components/schemas/IdentityMetadata" + } + ] + }, + "instance_id": { + "description": "The Instance to which the interface belongs.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address assigned to this interface.", + "type": "string", + "format": "ip" + }, + "mac": { + "description": "The MAC address assigned to this interface.", + "allOf": [ + { + "$ref": "#/components/schemas/MacAddr" + } + ] + }, + "subnet_id": { + "description": "The subnet to which the interface belongs.", + "type": "string", + "format": "uuid" + }, + "vpc_id": { + "description": "The VPC to which the interface belongs.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "identity", + "instance_id", + "ip", + "mac", + "subnet_id", + "vpc_id" + ] + }, + "NetworkInterfaceResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/NetworkInterface" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "Organization": { "description": "Client view of an [`Organization`]", "type": "object", @@ -3514,6 +3826,50 @@ "items" ] }, + "Role": { + "description": "Client view of a [`Role`]", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/RoleName" + } + }, + "required": [ + "description", + "name" + ] + }, + "RoleName": { + "title": "A name for a built-in role", + "description": "Role names consist of two string components separated by dot (\".\").", + "type": "string", + "pattern": "[a-z-]+\\.[a-z-]+", + "maxLength": 63 + }, + "RoleResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Role" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "RouteDestination": { "description": "A subset of [`NetworkTarget`], `RouteDestination` specifies the kind of network traffic that will be matched to be forwarded to the [`RouteTarget`].", "oneOf": [ @@ -4037,6 +4393,61 @@ "items" ] }, + "TimeseriesName": { + "title": "The name of a timeseries", + "description": "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words.", + "type": "string", + "pattern": "(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)" + }, + "TimeseriesSchema": { + "description": "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field.", + "type": "object", + "properties": { + "created": { + "type": "string", + "format": "date-time" + }, + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "field_schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FieldSchema" + } + }, + "timeseries_name": { + "$ref": "#/components/schemas/TimeseriesName" + } + }, + "required": [ + "created", + "datum_type", + "field_schema", + "timeseries_name" + ] + }, + "TimeseriesSchemaResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/TimeseriesSchema" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "User": { "description": "Client view of a [`User`]", "type": "object",