ollama run qwen3:0.6b
ollama stop qwen3:0.6b
ollama rm qwen3:0.6b
curl http://localhost:11434/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "qwen3:0.6b",
"messages": [
{"role": "user", "content": "Hello!"}
]
}'
curl http://localhost:11434/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "qwen3:0.6b",
"prompt": "Write a haiku about the moon."
}'
curl -X POST http://localhost:11434/api/generate -H "Content-Type: application/json" -d '{"model":"qwen3:0.6b", "prompt":"Hello", "stream":false}'
curl http://localhost:11434/api/chat -d '{
"model": "qwen3:0.6b",
"messages": [
{ "role": "user", "content": "Say hi" }
]
}'
curl http://localhost:11434/v1/models
curl http://localhost:11434/api/chat -d '{
"model": "deepseek-r1:14b",
"messages": [
{ "role": "user", "content": "Say hi" }
]
}'
curl http://localhost:11434/api/chat -d '{
"model": "gpt-oss:20b",
"messages": [
{ "role": "user", "content": "Produce a country info card for Finland" }
]
}'
curl http://localhost:11434/api/chat -d '{
"model": "qwen3-coder:30b",
"messages": [
{ "role": "user", "content": "why is the sky blue?" }
]
}'
Place as opencode.json at the root of the directory of a given project:
{
"$schema": "https://opencode.ai/config.json",
"provider": {
"ollama": {
"npm": "@ai-sdk/openai-compatible",
"name": "Ollama (local)",
"options": {
"baseURL": "http://localhost:11434/v1"
},
"models": {
"qwen3:0.6b": { "name": "qwen3 0.6b (local)" }
}
}
},
"model": "ollama/qwen3:0.6b"
}