import OpenAI from "openai"
const openai = new OpenAI({
baseURL: "https://api.aiapilab.com/v1",
apiKey: $AIAPILAB_API_KEY
})
async function main() {
const completion = await openai.chat.completions.create({
model: "mistralai/ministral-3b",
messages: [
{
"role": "user",
"content": "Write a blog about cat."
}
]
})
console.log(completion.choices[0].message)
}
main()
Feature/Aspect | Gemma 2 2B | Llama 3.2 3B | Ministral 3B |
---|---|---|---|
Parameters | 2 billion | 3 billion | 3 billion |
Ideal Use Cases | Suitable for simpler tasks but less efficient than Ministral 3B | General natural language tasks | Optimized for edge computing and on-device applications |
Maximum Context Length | 128,000 tokens | 128,000 tokens | 128,000 tokens |
Performance Benchmarks | Lower scores in multi-task evaluations compared to Ministral 3B | Strong in multilingual tasks, but slightly behind in some benchmarks | Outperforms Llama 3.2 3B and Gemma 2 2B on various tasks |
Function Calling Support | No native function calling support | Limited function calling capabilities | Yes, supports native function calling |