import OpenAI from "openai"
const openai = new OpenAI({
baseURL: "https://api.aiapilab.com/v1",
apiKey: $AIAPILAB_API_KEY
})
async function main() {
const completion = await openai.chat.completions.create({
model: "mistralai/ministral-8b",
messages: [
{
"role": "user",
"content": "Write a blog about cat."
}
]
})
console.log(completion.choices[0].message)
}
main()
Feature/Aspect | Mistral 7B | Llama 3.1 8B | Ministral 8B |
---|---|---|---|
Model Parameters | 7 billion | 8 billion | 8 billion |
Special Features | Standard attention mechanism | Standard attention mechanism | Interleaved sliding-window attention |
Maximum Context Length | 32,000 tokens | 32,000 tokens | 128,000 tokens |
Function Calling Support | Limited | Limited | Yes |
Performance in Knowledge | Lower performance than 8B models | Comparable performance | Outperforms Llama 3.1 and Mistral 7B |