2025-07-03 15:03:18 -04:00
|
|
|
use proc_macro::TokenStream;
|
|
|
|
use quote::quote;
|
2025-07-11 11:58:29 -04:00
|
|
|
use syn::{parse_macro_input, ItemEnum, Lit, DeriveInput, Fields, Data};
|
2025-07-03 15:03:18 -04:00
|
|
|
|
2025-07-10 16:50:22 -04:00
|
|
|
#[proc_macro_derive(HttpRequest, attributes(http_get))]
|
2025-07-03 15:03:18 -04:00
|
|
|
pub fn derive_http_get_request(input: TokenStream) -> TokenStream {
|
|
|
|
let input = parse_macro_input!(input as DeriveInput);
|
2025-07-10 16:40:53 -04:00
|
|
|
let name = &input.ident;
|
2025-07-03 15:03:18 -04:00
|
|
|
|
2025-07-10 16:40:53 -04:00
|
|
|
// Parse #[http_get(url = "...")] attribute
|
2025-07-10 16:34:43 -04:00
|
|
|
let mut base_url = None;
|
|
|
|
for attr in &input.attrs {
|
|
|
|
if attr.path().is_ident("http_get") {
|
2025-07-10 16:40:53 -04:00
|
|
|
let _ = attr.parse_nested_meta(|meta| {
|
|
|
|
if meta.path.is_ident("url") {
|
|
|
|
let value: Lit = meta.value()?.parse()?;
|
|
|
|
if let Lit::Str(litstr) = value {
|
|
|
|
base_url = Some(litstr.value());
|
2025-07-10 16:34:43 -04:00
|
|
|
}
|
|
|
|
}
|
2025-07-10 16:40:53 -04:00
|
|
|
Ok(())
|
|
|
|
});
|
2025-07-10 16:34:43 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let base_url = base_url.expect("Missing #[http_get(url = \"...\")] attribute");
|
|
|
|
|
2025-07-03 15:03:18 -04:00
|
|
|
let expanded = match &input.data {
|
|
|
|
Data::Struct(data_struct) => {
|
|
|
|
let fields = match &data_struct.fields {
|
|
|
|
Fields::Named(named) => &named.named,
|
2025-07-10 16:50:22 -04:00
|
|
|
_ => panic!("#[derive(HttpRequest)] only supports structs with named fields"),
|
2025-07-03 15:03:18 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
let mut query_param_code = Vec::new();
|
|
|
|
for field in fields {
|
|
|
|
let ident = field.ident.clone().unwrap();
|
|
|
|
let field_name = ident.to_string();
|
|
|
|
if field_name.starts_with("lnk_p_") {
|
|
|
|
let key = &field_name["lnk_p_".len()..];
|
|
|
|
query_param_code.push(quote! {
|
|
|
|
query_params.push((#key.to_string(), self.#ident.to_string()));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
quote! {
|
|
|
|
impl #name {
|
|
|
|
pub async fn send(
|
|
|
|
&self,
|
2025-07-11 12:29:34 -04:00
|
|
|
client: std::sync::Arc<actix_web::Client>,
|
2025-07-08 20:09:08 -04:00
|
|
|
headers: Option<Vec<(&str, &str)>>,
|
|
|
|
api_key: Option<&str>,
|
2025-07-11 12:29:34 -04:00
|
|
|
) -> Result<actix_web::ClientResponse, actix_web::error::SendRequestError> {
|
2025-07-03 15:03:18 -04:00
|
|
|
use urlencoding::encode;
|
|
|
|
|
|
|
|
let mut query_params: Vec<(String, String)> = Vec::new();
|
|
|
|
#(#query_param_code)*
|
|
|
|
|
2025-07-08 20:09:08 -04:00
|
|
|
if let Some(key) = api_key {
|
|
|
|
query_params.push(("api_key".to_string(), key.to_string()));
|
|
|
|
}
|
|
|
|
|
2025-07-10 16:34:43 -04:00
|
|
|
let mut url = #base_url.to_string();
|
2025-07-03 15:03:18 -04:00
|
|
|
if !query_params.is_empty() {
|
2025-07-10 16:34:43 -04:00
|
|
|
let query_parts: Vec<String> = query_params.iter()
|
|
|
|
.map(|(k, v)| format!("{}={}", k, encode(v)))
|
|
|
|
.collect();
|
2025-07-03 15:03:18 -04:00
|
|
|
url.push('?');
|
|
|
|
url.push_str(&query_parts.join("&"));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut request = client.get(url);
|
|
|
|
|
|
|
|
if let Some(hdrs) = headers {
|
|
|
|
for (k, v) in hdrs {
|
|
|
|
request = request.append_header((k, v));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let response = request.send().await?;
|
|
|
|
Ok(response)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Data::Enum(data_enum) => {
|
|
|
|
let mut variant_arms = Vec::new();
|
|
|
|
for variant in &data_enum.variants {
|
|
|
|
let vname = &variant.ident;
|
|
|
|
match &variant.fields {
|
|
|
|
Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
|
|
|
|
variant_arms.push(quote! {
|
2025-07-10 16:34:43 -04:00
|
|
|
#name::#vname(inner) => inner.send(client.clone(), headers.clone(), api_key).await,
|
2025-07-03 15:03:18 -04:00
|
|
|
});
|
|
|
|
}
|
2025-07-10 16:50:22 -04:00
|
|
|
_ => panic!("#[derive(HttpRequest)] enum variants must have a single unnamed field"),
|
2025-07-03 15:03:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
quote! {
|
|
|
|
impl #name {
|
2025-07-08 20:09:08 -04:00
|
|
|
pub async fn send(
|
|
|
|
&self,
|
2025-07-11 12:29:34 -04:00
|
|
|
client: std::sync::Arc<actix_web::Client>,
|
2025-07-08 20:09:08 -04:00
|
|
|
headers: Option<Vec<(&str, &str)>>,
|
|
|
|
api_key: Option<&str>,
|
2025-07-11 12:29:34 -04:00
|
|
|
) -> Result<actix_web::ClientResponse, actix_web::error::SendRequestError> {
|
2025-07-03 15:03:18 -04:00
|
|
|
match self {
|
|
|
|
#(#variant_arms)*
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-07-10 16:50:22 -04:00
|
|
|
_ => panic!("#[derive(HttpRequest)] only supports structs and enums"),
|
|
|
|
};
|
|
|
|
|
|
|
|
TokenStream::from(expanded)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[proc_macro_derive(HttpResponse)]
|
|
|
|
pub fn derive_http_response(input: TokenStream) -> TokenStream {
|
|
|
|
let input = parse_macro_input!(input as DeriveInput);
|
|
|
|
let name = &input.ident;
|
|
|
|
|
|
|
|
let expanded = quote! {
|
|
|
|
impl #name {
|
2025-07-11 12:29:34 -04:00
|
|
|
pub async fn receive(resp: actix_web::ClientResponse) -> Result<Self, actix_web::error::JsonPayloadError> {
|
2025-07-10 16:50:22 -04:00
|
|
|
resp.json().await
|
|
|
|
}
|
|
|
|
}
|
2025-07-03 15:03:18 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
TokenStream::from(expanded)
|
|
|
|
}
|
2025-07-10 17:10:31 -04:00
|
|
|
|
|
|
|
#[proc_macro_derive(SendVec)]
|
|
|
|
pub fn derive_send_vec(input: TokenStream) -> TokenStream {
|
|
|
|
let input = parse_macro_input!(input as DeriveInput);
|
|
|
|
let name = &input.ident;
|
|
|
|
|
|
|
|
let expanded = quote! {
|
|
|
|
impl #name {
|
|
|
|
/// Sends all items in the vec sequentially, awaiting each.
|
|
|
|
pub async fn send_vec(
|
|
|
|
items: Vec<Self>,
|
2025-07-11 12:29:34 -04:00
|
|
|
client: std::sync::Arc<actix_web::Client>,
|
2025-07-10 17:10:31 -04:00
|
|
|
headers: Option<Vec<(&str, &str)>>,
|
|
|
|
api_key: Option<&str>,
|
2025-07-11 12:29:34 -04:00
|
|
|
) -> Result<Vec<actix_web::ClientResponse>, actix_web::error::SendRequestError> {
|
2025-07-10 17:10:31 -04:00
|
|
|
let mut responses = Vec::with_capacity(items.len());
|
|
|
|
for item in items {
|
|
|
|
let resp = item.send(client.clone(), headers.clone(), api_key).await?;
|
|
|
|
responses.push(resp);
|
|
|
|
}
|
|
|
|
Ok(responses)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TokenStream::from(expanded)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[proc_macro_derive(ResponseVec)]
|
|
|
|
pub fn derive_response_vec(input: TokenStream) -> TokenStream {
|
|
|
|
let input = parse_macro_input!(input as DeriveInput);
|
|
|
|
let name = &input.ident;
|
|
|
|
|
|
|
|
let expanded = quote! {
|
|
|
|
impl #name {
|
|
|
|
/// Deserializes all responses sequentially into a Vec<Self>.
|
|
|
|
/// Assumes `Self` implements `DeserializeOwned`.
|
|
|
|
pub async fn response_vec(
|
2025-07-11 12:29:34 -04:00
|
|
|
responses: Vec<actix_web::ClientResponse>,
|
|
|
|
) -> Result<Vec<Self>, actix_web::error::JsonPayloadError>
|
2025-07-10 17:10:31 -04:00
|
|
|
where
|
|
|
|
Self: Sized + serde::de::DeserializeOwned,
|
|
|
|
{
|
|
|
|
let mut results = Vec::with_capacity(responses.len());
|
|
|
|
for resp in responses {
|
|
|
|
let item = resp.json::<Self>().await?;
|
|
|
|
results.push(item);
|
|
|
|
}
|
|
|
|
Ok(results)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TokenStream::from(expanded)
|
|
|
|
}
|
2025-07-11 11:58:29 -04:00
|
|
|
|
|
|
|
#[proc_macro_attribute]
|
|
|
|
pub fn alpaca_cli(_attr: TokenStream, item: TokenStream) -> TokenStream {
|
|
|
|
let input_enum = parse_macro_input!(item as ItemEnum);
|
|
|
|
let enum_name = &input_enum.ident;
|
|
|
|
let variants = &input_enum.variants;
|
|
|
|
|
|
|
|
// Match arms for regular command variants (Single, Asset, etc.)
|
|
|
|
let regular_arms = variants.iter().filter_map(|v| {
|
|
|
|
let v_name = &v.ident;
|
|
|
|
|
|
|
|
// Skip Bulk variant — we handle it separately
|
|
|
|
if v_name == "Bulk" {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(quote! {
|
|
|
|
#enum_name::#v_name(req) => {
|
|
|
|
let res = req.send(client.clone(), &api_key).await?;
|
|
|
|
let body = res.body().await?;
|
|
|
|
println!("{}", std::str::from_utf8(&body)?);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
let expanded = quote! {
|
|
|
|
#[derive(structopt::StructOpt, Debug)]
|
|
|
|
#input_enum
|
|
|
|
|
|
|
|
#[tokio::main]
|
|
|
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|
|
|
use structopt::StructOpt;
|
|
|
|
use std::fs::File;
|
|
|
|
use std::io::{BufReader, Read};
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::thread;
|
|
|
|
|
|
|
|
const THREADS: usize = 4;
|
|
|
|
|
|
|
|
// Initialize shared HTTP client and API key
|
2025-07-11 12:29:34 -04:00
|
|
|
let client = Arc::new(actix_web::Client::default());
|
2025-07-11 11:58:29 -04:00
|
|
|
let api_key = std::env::var("APCA_API_KEY_ID")?;
|
|
|
|
let cmd = #enum_name::from_args();
|
|
|
|
|
|
|
|
match cmd {
|
|
|
|
#(#regular_arms)*
|
|
|
|
|
|
|
|
#enum_name::Bulk { input } => {
|
|
|
|
// Choose input source: file or stdin
|
|
|
|
let mut reader: Box<dyn Read> = match input {
|
|
|
|
Some(path) => Box::new(File::open(path)?),
|
|
|
|
None => Box::new(std::io::stdin()),
|
|
|
|
};
|
|
|
|
|
|
|
|
// Read input JSON into buffer
|
|
|
|
let mut buf = String::new();
|
|
|
|
reader.read_to_string(&mut buf)?;
|
|
|
|
|
|
|
|
// Deserialize into Vec<Query>
|
|
|
|
let queries: Vec<Query> = serde_json::from_str(&buf)?;
|
|
|
|
let total = queries.len();
|
|
|
|
|
|
|
|
if total == 0 {
|
|
|
|
eprintln!("No queries provided.");
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
let shared_queries = Arc::new(queries);
|
|
|
|
let shared_key = Arc::new(api_key);
|
|
|
|
|
|
|
|
let per_thread = total / THREADS;
|
|
|
|
|
|
|
|
let mut handles = Vec::with_capacity(THREADS);
|
|
|
|
for i in 0..THREADS {
|
|
|
|
let queries_clone = Arc::clone(&shared_queries);
|
|
|
|
let client_clone = Arc::clone(&client);
|
|
|
|
let key_clone = Arc::clone(&shared_key);
|
|
|
|
|
|
|
|
let start_index = i * per_thread;
|
|
|
|
let end_index = if i == THREADS - 1 {
|
|
|
|
total // Last thread gets the remainder
|
|
|
|
} else {
|
|
|
|
start_index + per_thread
|
|
|
|
};
|
|
|
|
|
|
|
|
let handle = thread::spawn(move || {
|
|
|
|
let rt = tokio::runtime::Runtime::new().expect("Failed to create runtime");
|
|
|
|
for idx in start_index..end_index {
|
|
|
|
let query = &queries_clone[idx];
|
|
|
|
let send_result = rt.block_on(query.send(client_clone.clone(), &key_clone));
|
|
|
|
|
|
|
|
match send_result {
|
|
|
|
Ok(response) => {
|
|
|
|
let body_result = rt.block_on(response.body());
|
|
|
|
match body_result {
|
|
|
|
Ok(body) => println!("{}", String::from_utf8_lossy(&body)),
|
|
|
|
Err(e) => eprintln!("Error reading response body: {:?}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
eprintln!("Request failed: {:?}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
handles.push(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all threads to complete
|
|
|
|
for handle in handles {
|
|
|
|
handle.join().expect("A thread panicked");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TokenStream::from(expanded)
|
|
|
|
}
|