Overview
This page provides practical examples showing how to integrate MySafeCache into your applications. These examples demonstrate the basic cache check → LLM call → store pattern.Basic Python Example
This example shows the complete MySafeCache workflow using Python:Copy
import requests
import os
from typing import List, Dict, Any
class MySafeCacheClient:
def __init__(self, api_key: str, base_url: str = "https://api.mysafecache.com/api/v1"):
self.api_key = api_key
self.base_url = base_url
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
def check_cache(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
"""Check if a response is cached for the given messages."""
response = requests.post(
f"{self.base_url}/check",
headers=self.headers,
json={"messages": messages}
)
response.raise_for_status()
return response.json()
def store_response(self, messages: List[Dict[str, str]], answer: str,
model: str = "gpt-4", tokens_used: int = None) -> Dict[str, Any]:
"""Store a response in the cache."""
payload = {
"messages": messages,
"answer": answer,
"model": model
}
if tokens_used:
payload["tokens_used"] = tokens_used
response = requests.post(
f"{self.base_url}/store",
headers=self.headers,
json=payload
)
response.raise_for_status()
return response.json()
def get_usage_stats(self) -> Dict[str, Any]:
"""Get cache usage statistics."""
response = requests.get(
f"{self.base_url}/usage",
headers=self.headers
)
response.raise_for_status()
return response.json()
# Example usage
def main():
# Initialize client
api_key = os.getenv("MYSAFECACHE_API_KEY")
if not api_key:
raise ValueError("MYSAFECACHE_API_KEY environment variable is required")
client = MySafeCacheClient(api_key)
# Example messages
messages = [
{"role": "user", "content": "What is artificial intelligence?"}
]
# 1. Check cache
print("🔍 Checking cache...")
cache_result = client.check_cache(messages)
if cache_result["cache_hit"]:
print(f"✅ Cache hit! ({cache_result['cache_type']})")
print(f"⏱️ Lookup time: {cache_result['lookup_time_ms']:.1f}ms")
print(f"💰 Tokens saved: {cache_result['tokens_saved']}")
print(f"📄 Answer: {cache_result['answer'][:100]}...")
else:
print(f"❌ Cache miss (lookup time: {cache_result['lookup_time_ms']:.1f}ms)")
# 2. Simulate LLM call (replace with your actual LLM)
print("🤖 Calling LLM...")
simulated_answer = "Artificial intelligence (AI) refers to the simulation of human intelligence in machines..."
# 3. Store the response
print("💾 Storing response in cache...")
store_result = client.store_response(messages, simulated_answer, "gpt-4", 150)
if store_result["stored"]:
print(f"✅ Stored in cache types: {store_result['cache_types_stored']}")
print(f"⏱️ Storage time: {store_result['storage_time_ms']:.1f}ms")
print(f"📄 Answer: {simulated_answer[:100]}...")
# 4. Get usage statistics
print("\n📊 Cache Statistics:")
stats = client.get_usage_stats()
print(f"Total requests: {stats['total_requests']}")
print(f"Hit rate: {stats['hit_rate_percentage']:.1f}%")
print(f"Average lookup time: {stats['average_lookup_time_ms']:.1f}ms")
if __name__ == "__main__":
main()
Basic JavaScript Example
Here’s the same functionality using JavaScript/Node.js:Copy
const axios = require('axios');
class MySafeCacheClient {
constructor(apiKey, baseUrl = 'https://api.mysafecache.com/api/v1') {
this.apiKey = apiKey;
this.baseUrl = baseUrl;
this.headers = {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
};
}
async checkCache(messages) {
try {
const response = await axios.post(
`${this.baseUrl}/check`,
{ messages },
{ headers: this.headers }
);
return response.data;
} catch (error) {
throw new Error(`Cache check failed: ${error.response?.data?.message || error.message}`);
}
}
async storeResponse(messages, answer, model = 'gpt-4', tokensUsed = null) {
const payload = { messages, answer, model };
if (tokensUsed) {
payload.tokens_used = tokensUsed;
}
try {
const response = await axios.post(
`${this.baseUrl}/store`,
payload,
{ headers: this.headers }
);
return response.data;
} catch (error) {
throw new Error(`Store failed: ${error.response?.data?.message || error.message}`);
}
}
async getUsageStats() {
try {
const response = await axios.get(
`${this.baseUrl}/usage`,
{ headers: this.headers }
);
return response.data;
} catch (error) {
throw new Error(`Usage stats failed: ${error.response?.data?.message || error.message}`);
}
}
}
// Example usage
async function main() {
const apiKey = process.env.MYSAFECACHE_API_KEY;
if (!apiKey) {
throw new Error('MYSAFECACHE_API_KEY environment variable is required');
}
const client = new MySafeCacheClient(apiKey);
const messages = [
{ role: 'user', content: 'What is artificial intelligence?' }
];
try {
// 1. Check cache
console.log('🔍 Checking cache...');
const cacheResult = await client.checkCache(messages);
if (cacheResult.cache_hit) {
console.log(`✅ Cache hit! (${cacheResult.cache_type})`);
console.log(`⏱️ Lookup time: ${cacheResult.lookup_time_ms.toFixed(1)}ms`);
console.log(`💰 Tokens saved: ${cacheResult.tokens_saved}`);
console.log(`📄 Answer: ${cacheResult.answer.substring(0, 100)}...`);
} else {
console.log(`❌ Cache miss (lookup time: ${cacheResult.lookup_time_ms.toFixed(1)}ms)`);
// 2. Simulate LLM call
console.log('🤖 Calling LLM...');
const simulatedAnswer = 'Artificial intelligence (AI) refers to the simulation of human intelligence in machines...';
// 3. Store the response
console.log('💾 Storing response in cache...');
const storeResult = await client.storeResponse(messages, simulatedAnswer, 'gpt-4', 150);
if (storeResult.stored) {
console.log(`✅ Stored in cache types: ${storeResult.cache_types_stored.join(', ')}`);
console.log(`⏱️ Storage time: ${storeResult.storage_time_ms.toFixed(1)}ms`);
}
console.log(`📄 Answer: ${simulatedAnswer.substring(0, 100)}...`);
}
// 4. Get usage statistics
console.log('\n📊 Cache Statistics:');
const stats = await client.getUsageStats();
console.log(`Total requests: ${stats.total_requests}`);
console.log(`Hit rate: ${stats.hit_rate_percentage.toFixed(1)}%`);
console.log(`Average lookup time: ${stats.average_lookup_time_ms.toFixed(1)}ms`);
} catch (error) {
console.error('Error:', error.message);
}
}
// Run the example
main().catch(console.error);
Basic cURL Examples
For testing or shell scripting, here are basic cURL examples:Check Cache
Copy
#!/bin/bash
API_KEY="your-api-key-here"
BASE_URL="https://api.mysafecache.com/api/v1"
# Check cache
CACHE_RESPONSE=$(curl -s -X POST "$BASE_URL/check" \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{
"role": "user",
"content": "What is Docker?"
}
]
}')
echo "Cache check response:"
echo "$CACHE_RESPONSE" | jq '.'
# Check if it's a cache hit
CACHE_HIT=$(echo "$CACHE_RESPONSE" | jq -r '.cache_hit')
if [ "$CACHE_HIT" = "true" ]; then
echo "✅ Cache hit!"
ANSWER=$(echo "$CACHE_RESPONSE" | jq -r '.answer')
echo "Answer: $ANSWER"
else
echo "❌ Cache miss - would call LLM here"
# Store a response (simulate LLM call)
echo "💾 Storing simulated response..."
STORE_RESPONSE=$(curl -s -X POST "$BASE_URL/store" \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{
"role": "user",
"content": "What is Docker?"
}
],
"answer": "Docker is a containerization platform that allows developers to package applications and their dependencies into lightweight, portable containers.",
"model": "gpt-4",
"tokens_used": 150
}')
echo "Store response:"
echo "$STORE_RESPONSE" | jq '.'
fi
Get Usage Statistics
Copy
#!/bin/bash
API_KEY="your-api-key-here"
BASE_URL="https://api.mysafecache.com/api/v1"
# Get usage statistics
USAGE_RESPONSE=$(curl -s -X GET "$BASE_URL/usage" \
-H "Authorization: Bearer $API_KEY")
echo "📊 Usage Statistics:"
echo "$USAGE_RESPONSE" | jq '.'
# Extract specific metrics
TOTAL_REQUESTS=$(echo "$USAGE_RESPONSE" | jq -r '.total_requests')
HIT_RATE=$(echo "$USAGE_RESPONSE" | jq -r '.hit_rate_percentage')
AVG_LOOKUP_TIME=$(echo "$USAGE_RESPONSE" | jq -r '.average_lookup_time_ms')
echo ""
echo "Summary:"
echo "Total requests: $TOTAL_REQUESTS"
echo "Hit rate: $HIT_RATE%"
echo "Average lookup time: ${AVG_LOOKUP_TIME}ms"
Error Handling Examples
Python Error Handling
Copy
import requests
from requests.exceptions import RequestException
def robust_cache_check(messages, api_key, max_retries=3):
"""Cache check with error handling and retries."""
for attempt in range(max_retries):
try:
response = requests.post(
"https://api.mysafecache.com/api/v1/check",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
},
json={"messages": messages},
timeout=10 # 10 second timeout
)
# Check for HTTP errors
response.raise_for_status()
return response.json()
except requests.exceptions.Timeout:
print(f"⏰ Timeout on attempt {attempt + 1}")
if attempt == max_retries - 1:
raise Exception("Cache check timed out after all retries")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise Exception("Invalid API key")
elif e.response.status_code == 429:
print(f"⚠️ Rate limited on attempt {attempt + 1}")
import time
time.sleep(2 ** attempt) # Exponential backoff
else:
raise Exception(f"HTTP error: {e.response.status_code}")
except RequestException as e:
print(f"🔌 Network error on attempt {attempt + 1}: {e}")
if attempt == max_retries - 1:
raise Exception("Network error after all retries")
# Usage with error handling
try:
result = robust_cache_check(messages, api_key)
if result["cache_hit"]:
print("Cache hit!")
else:
print("Cache miss - calling LLM...")
except Exception as e:
print(f"❌ Error: {e}")
# Fallback to direct LLM call
JavaScript Error Handling
Copy
async function robustCacheCheck(messages, apiKey, maxRetries = 3) {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const response = await fetch('https://api.mysafecache.com/api/v1/check', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({ messages }),
signal: AbortSignal.timeout(10000) // 10 second timeout
});
if (response.status === 401) {
throw new Error('Invalid API key');
}
if (response.status === 429) {
console.log(`⚠️ Rate limited on attempt ${attempt + 1}`);
if (attempt < maxRetries - 1) {
await new Promise(resolve => setTimeout(resolve, Math.pow(2, attempt) * 1000));
continue;
}
}
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
return await response.json();
} catch (error) {
if (error.name === 'TimeoutError') {
console.log(`⏰ Timeout on attempt ${attempt + 1}`);
} else if (error.name === 'TypeError') {
console.log(`🔌 Network error on attempt ${attempt + 1}`);
}
if (attempt === maxRetries - 1) {
throw new Error(`Failed after ${maxRetries} attempts: ${error.message}`);
}
}
}
}
// Usage with error handling
try {
const result = await robustCacheCheck(messages, apiKey);
if (result.cache_hit) {
console.log('Cache hit!');
} else {
console.log('Cache miss - calling LLM...');
}
} catch (error) {
console.error('❌ Error:', error.message);
// Fallback to direct LLM call
}
Testing Your Integration
Here’s a simple test script to verify your MySafeCache integration:Copy
def test_mysafecache_integration(api_key):
"""Test basic MySafeCache functionality."""
client = MySafeCacheClient(api_key)
test_messages = [{"role": "user", "content": "Test message for integration"}]
print("🧪 Testing MySafeCache integration...")
# Test 1: Check cache (should be miss for new message)
print("\n1. Testing cache check...")
try:
result = client.check_cache(test_messages)
if result["cache_hit"]:
print("⚠️ Unexpected cache hit (cache may be warm)")
else:
print("✅ Cache miss as expected")
except Exception as e:
print(f"❌ Cache check failed: {e}")
return False
# Test 2: Store response
print("\n2. Testing response storage...")
try:
store_result = client.store_response(
test_messages,
"This is a test response",
"gpt-4",
10
)
if store_result["stored"]:
print("✅ Response stored successfully")
else:
print("❌ Failed to store response")
return False
except Exception as e:
print(f"❌ Store failed: {e}")
return False
# Test 3: Check cache again (should be hit now)
print("\n3. Testing cache hit...")
try:
result = client.check_cache(test_messages)
if result["cache_hit"]:
print("✅ Cache hit as expected")
print(f" Cache type: {result['cache_type']}")
print(f" Lookup time: {result['lookup_time_ms']:.1f}ms")
else:
print("❌ Expected cache hit but got miss")
return False
except Exception as e:
print(f"❌ Second cache check failed: {e}")
return False
# Test 4: Get usage stats
print("\n4. Testing usage statistics...")
try:
stats = client.get_usage_stats()
print(f"✅ Usage stats retrieved")
print(f" Total requests: {stats['total_requests']}")
print(f" Hit rate: {stats['hit_rate_percentage']:.1f}%")
except Exception as e:
print(f"❌ Usage stats failed: {e}")
return False
print("\n🎉 All tests passed! MySafeCache integration is working correctly.")
return True
# Run the test
if __name__ == "__main__":
api_key = os.getenv("MYSAFECACHE_API_KEY")
if api_key:
test_mysafecache_integration(api_key)
else:
print("❌ MYSAFECACHE_API_KEY environment variable not set")