Create a chat completion using various AI models
curl --request POST \
--url https://router.requesty.ai/v1/chat/completions \
--header 'Authorization: Bearer <token>' \
--header 'Content-Type: application/json' \
--data '{
"model": "openai/gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "<string>",
"name": "<string>"
}
],
"max_tokens": 123,
"temperature": 123,
"top_p": 123,
"stream": true,
"tools": [
{
"type": "function",
"function": {
"name": "<string>",
"description": "<string>",
"parameters": {}
}
}
],
"tool_choice": "<string>",
"response_format": {}
}'
{
"id": "<string>",
"object": "<string>",
"created": 123,
"model": "<string>",
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123
},
"choices": [
{
"index": 123,
"message": {
"role": "user",
"content": "<string>",
"name": "<string>"
},
"finish_reason": "<string>"
}
]
}
API key for authentication
Chat completion response
The response is of type object
.
curl --request POST \
--url https://router.requesty.ai/v1/chat/completions \
--header 'Authorization: Bearer <token>' \
--header 'Content-Type: application/json' \
--data '{
"model": "openai/gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "<string>",
"name": "<string>"
}
],
"max_tokens": 123,
"temperature": 123,
"top_p": 123,
"stream": true,
"tools": [
{
"type": "function",
"function": {
"name": "<string>",
"description": "<string>",
"parameters": {}
}
}
],
"tool_choice": "<string>",
"response_format": {}
}'
{
"id": "<string>",
"object": "<string>",
"created": 123,
"model": "<string>",
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123
},
"choices": [
{
"index": 123,
"message": {
"role": "user",
"content": "<string>",
"name": "<string>"
},
"finish_reason": "<string>"
}
]
}