from portkey_ai import Portkey# Initialize the Portkey clientportkey = Portkey( api_key="PORTKEY_API_KEY", # Replace with your Portkey API key provider="@VERTEX_PROVIDER", vertex_storage_bucket_name="your_bucket_name", # Specify the GCS bucket name provider_file_name="your_file_name.jsonl", # Specify the file name in GCS provider_model="gemini-1.5-flash-001" # Specify the model to use)# Upload a file for batch inferencefile = portkey.files.create( file=open("dataset.jsonl", "rb"), purpose="batch")print(file)
from portkey_ai import Portkey# Initialize the Portkey clientportkey = Portkey( api_key="PORTKEY_API_KEY", # Replace with your Portkey API key provider="@VERTEX_PROVIDER")# Create a batch inference jobbatch_job = portkey.batches.create( input_file_id="<file_id>", # File ID from the upload step endpoint="/v1/chat/completions", # API endpoint to use completion_window="24h", # Time window for completion model="gemini-1.5-flash-001")print(batch_job)
from portkey_ai import Portkey# Initialize the Portkey clientportkey = Portkey( api_key="PORTKEY_API_KEY", # Replace with your Portkey API key provider="@VERTEX_PROVIDER")# List all batch jobsjobs = portkey.batches.list( limit=10 # Optional: Number of jobs to retrieve (default: 20))print(jobs)
from portkey_ai import Portkey# Initialize the Portkey clientportkey = Portkey( api_key="PORTKEY_API_KEY", # Replace with your Portkey API key provider="@VERTEX_PROVIDER")# Retrieve a specific batch jobjob = portkey.batches.retrieve( "job_id" # The ID of the batch job to retrieve)print(job)