Skip to content

Instantly share code, notes, and snippets.

@dhcgn
Last active May 24, 2025 21:40
Show Gist options
  • Save dhcgn/6dd925491d5c649e421f6f0bde38696a to your computer and use it in GitHub Desktop.
Save dhcgn/6dd925491d5c649e421f6f0bde38696a to your computer and use it in GitHub Desktop.
privatemode json mode
bash privatemode-ai/conversation-json-mode.sh "Best Books?" http://192.168.3.10:9876

Testing API endpoint... API endpoint validated - required model found Chat completion output: {"type": "function", "name": "get_best_books", "parameters": {}} Request-to-response time: 1 seconds

Expected

#!/bin/bash
usage() {
echo "Usage: $0 <prompt> <api_url>"
echo "Example: $0 \"What is the meaning of life?\" http://localhost:8080"
echo ""
echo "Parameters:"
echo " prompt - The prompt/question to send to the AI"
echo " api_url - API URL for the service (e.g., http://localhost:8080)"
exit 1
}
cleanup() {
rm -f "$TEMP_JSON_FILE"
}
trap cleanup EXIT
if [ "$#" -ne 2 ]; then
usage
fi
PROMPT=$1
API_URL=$2
# Remove trailing slash if present
API_URL=${API_URL%/}
# Escape the prompt for JSON
PROMPT_JSON=$(echo "$PROMPT" | jq -Rs .)
TEMP_JSON_FILE=$(mktemp)
cat > "$TEMP_JSON_FILE" <<EOF
{
"model": "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": $PROMPT_JSON
}
]
}
],
"response_format": {
"type": "json_object",
"schema": {
"name": "results_list",
"schema": {
"type": "object",
"properties": {
"results": {
"type": "array",
"description": "A list of results, each containing text and accuracy percentage.",
"items": {
"type": "object",
"properties": {
"result_text": {
"type": "string",
"description": "The text of the result."
},
"accuracy_percent": {
"type": "number",
"description": "The accuracy of this result represented as a percentage."
}
},
"required": [
"result_text",
"accuracy_percent"
],
"additionalProperties": false
}
}
},
"required": [
"results"
],
"additionalProperties": false
},
"strict": true
}
}
}
EOF
timestamp=$(date +"%Y%m%d_%H%M%S")
# Validate JSON syntax
if ! jq empty "$TEMP_JSON_FILE" 2>/dev/null; then
echo "Error: Generated JSON is invalid"
echo "JSON content see logging/$timestamp.privatemode_request_json_error.json"
cat "$TEMP_JSON_FILE" > "logging/$timestamp.privatemode_request_json_error.json"
exit 1
fi
mkdir -p logging
start_time=$(date +%s)
# Test Endpoint
# Test if the API endpoint is reachable and has the required model
echo "Testing API endpoint..."
models_response=$(curl -s "$API_URL/v1/models")
if ! echo "$models_response" | jq -e '.data[] | select(.id == "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4")' > /dev/null; then
echo "Error: Required model 'ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4' not found in API response"
echo "Available models:"
echo "$models_response" | jq -r '.data[].id // "No models found"'
exit 1
fi
echo "API endpoint validated - required model found"
response=$(curl -s $API_URL/v1/chat/completions \
-H "Content-Type: application/json" \
-d @"$TEMP_JSON_FILE")
end_time=$(date +%s)
response_time=$((end_time - start_time))
# echo "Response from API:"
# echo "$response"
echo "$response" | jq > "logging/$timestamp.privatemode_conversation_response.json"
cat "$TEMP_JSON_FILE" > "logging/$timestamp.privatemode_conversation_request.json"
# Extract and display the content
content=$(echo "$response" | jq -r '.choices[0].message.content // "ERROR from script: No content found"')
echo "Chat completion output: $content"
echo "Request-to-response time: ${response_time} seconds"
# Also save the content to a markdown file
echo "$content" > "logging/$timestamp.privatemode_conversation_response.md"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment