Chat Completions
Portkey Endpoints
Embeddings
Other APIs
Completions
Moderations
Fine-tuning
Assistants
- Assistants
- Threads
- Messages
- Runs
- Run Steps
Create a Response
POST
/
responses
curl --request POST \
--url https://api.portkey.ai/v1/responses \
--header 'Content-Type: application/json' \
--header 'x-portkey-api-key: <api-key>' \
--data '{
"metadata": {},
"temperature": 1,
"top_p": 1,
"user": "user-1234",
"previous_response_id": "<string>",
"model": "gpt-4o",
"reasoning": {
"effort": "medium",
"generate_summary": "concise"
},
"max_output_tokens": 123,
"instructions": "<string>",
"text": {
"format": {
"type": "text"
}
},
"tools": [
{
"type": "file_search",
"vector_store_ids": [
"<string>"
],
"max_num_results": 123,
"filters": {
"type": "eq",
"key": "<string>",
"value": "<string>"
},
"ranking_options": {
"ranker": "auto",
"score_threshold": 0
}
}
],
"tool_choice": "none",
"truncation": "disabled",
"input": "<string>",
"include": [
"file_search_call.results"
],
"parallel_tool_calls": true,
"store": true,
"stream": false
}'
{
"metadata": {},
"temperature": 1,
"top_p": 1,
"user": "user-1234",
"previous_response_id": "<string>",
"model": "gpt-4o",
"reasoning": {
"effort": "medium",
"generate_summary": "concise"
},
"max_output_tokens": 123,
"instructions": "<string>",
"text": {
"format": {
"type": "text"
}
},
"tools": [
{
"type": "file_search",
"vector_store_ids": [
"<string>"
],
"max_num_results": 123,
"filters": {
"type": "eq",
"key": "<string>",
"value": "<string>"
},
"ranking_options": {
"ranker": "auto",
"score_threshold": 0
}
}
],
"tool_choice": "none",
"truncation": "disabled",
"id": "<string>",
"object": "response",
"status": "completed",
"created_at": 123,
"error": {
"code": "server_error",
"message": "<string>"
},
"incomplete_details": {
"reason": "max_output_tokens"
},
"output": [
{
"id": "<string>",
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "<string>",
"annotations": [
{
"type": "file_citation",
"index": 123,
"file_id": "<string>"
}
]
}
],
"status": "in_progress"
}
],
"output_text": "<string>",
"usage": {
"input_tokens": 123,
"input_tokens_details": {
"cached_tokens": 123
},
"output_tokens": 123,
"output_tokens_details": {
"reasoning_tokens": 123
},
"total_tokens": 123
},
"parallel_tool_calls": true
}
Authorizations
Body
application/json
Response
200
application/json
OK
The response is of type object
.
Was this page helpful?
curl --request POST \
--url https://api.portkey.ai/v1/responses \
--header 'Content-Type: application/json' \
--header 'x-portkey-api-key: <api-key>' \
--data '{
"metadata": {},
"temperature": 1,
"top_p": 1,
"user": "user-1234",
"previous_response_id": "<string>",
"model": "gpt-4o",
"reasoning": {
"effort": "medium",
"generate_summary": "concise"
},
"max_output_tokens": 123,
"instructions": "<string>",
"text": {
"format": {
"type": "text"
}
},
"tools": [
{
"type": "file_search",
"vector_store_ids": [
"<string>"
],
"max_num_results": 123,
"filters": {
"type": "eq",
"key": "<string>",
"value": "<string>"
},
"ranking_options": {
"ranker": "auto",
"score_threshold": 0
}
}
],
"tool_choice": "none",
"truncation": "disabled",
"input": "<string>",
"include": [
"file_search_call.results"
],
"parallel_tool_calls": true,
"store": true,
"stream": false
}'
{
"metadata": {},
"temperature": 1,
"top_p": 1,
"user": "user-1234",
"previous_response_id": "<string>",
"model": "gpt-4o",
"reasoning": {
"effort": "medium",
"generate_summary": "concise"
},
"max_output_tokens": 123,
"instructions": "<string>",
"text": {
"format": {
"type": "text"
}
},
"tools": [
{
"type": "file_search",
"vector_store_ids": [
"<string>"
],
"max_num_results": 123,
"filters": {
"type": "eq",
"key": "<string>",
"value": "<string>"
},
"ranking_options": {
"ranker": "auto",
"score_threshold": 0
}
}
],
"tool_choice": "none",
"truncation": "disabled",
"id": "<string>",
"object": "response",
"status": "completed",
"created_at": 123,
"error": {
"code": "server_error",
"message": "<string>"
},
"incomplete_details": {
"reason": "max_output_tokens"
},
"output": [
{
"id": "<string>",
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "<string>",
"annotations": [
{
"type": "file_citation",
"index": 123,
"file_id": "<string>"
}
]
}
],
"status": "in_progress"
}
],
"output_text": "<string>",
"usage": {
"input_tokens": 123,
"input_tokens_details": {
"cached_tokens": 123
},
"output_tokens": 123,
"output_tokens_details": {
"reasoning_tokens": 123
},
"total_tokens": 123
},
"parallel_tool_calls": true
}
Assistant
Responses are generated using AI and may contain mistakes.