┌─────────┐ ┌─────────┐ ┌──────────┐│ Client │ ──────> │ Your │ ──────> │ Addis AI ││ App/Web │ │ Server │ │ API │└─────────┘ <────── └─────────┘ <────── └──────────┘
npm install express axios cors dotenv
.env
file for your API key:
ADDIS_AI_API_KEY=your_api_key_here
app.js
file:
const express = require("express");const axios = require("axios");const cors = require("cors");const dotenv = require("dotenv");const multer = require("multer");const FormData = require("form-data");const fs = require("fs");// Load environment variablesdotenv.config();const app = express();const port = process.env.PORT || 3000;const upload = multer({ dest: "uploads/" });// Middlewareapp.use(cors());app.use(express.json());// Addis AI API configconst ADDIS_AI_API_KEY = process.env.ADDIS_AI_API_KEY;const ADDIS_AI_BASE_URL = "https://api.addisassistant.com/api/v1";// Rate limiting middleware (simple example)const requestCounts = {};const RATE_LIMIT = 100; // requests per hourconst RATE_WINDOW = 60 * 60 * 1000; // 1 hour in millisecondsfunction rateLimiter(req, res, next) {const ip = req.ip;const now = Date.now();if (!requestCounts[ip]) {requestCounts[ip] = { count: 1, resetTime: now + RATE_WINDOW };} else if (requestCounts[ip].resetTime < now) {// Reset if window expiredrequestCounts[ip] = { count: 1, resetTime: now + RATE_WINDOW };} else if (requestCounts[ip].count >= RATE_LIMIT) {return res.status(429).json({ error: "Rate limit exceeded. Try again later." });} else {requestCounts[ip].count++;}next();}// Chat generate endpointapp.post("/api/chat", rateLimiter, async (req, res) => {try {const { prompt, target_language, conversation_history, generation_config } =req.body;// Validate required fieldsif (!prompt) {return res.status(400).json({ error: "Prompt is required" });}// Forward request to Addis AIconst response = await axios.post(`${ADDIS_AI_BASE_URL}/chat_generate`,{prompt,target_language: target_language || "am",conversation_history: conversation_history || [],generation_config: generation_config || { temperature: 0.7 },},{headers: {"Content-Type": "application/json","X-API-Key": ADDIS_AI_API_KEY,},},);res.json(response.data);} catch (error) {console.error("Error calling Addis AI:",error.response?.data || error.message,);if (error.response) {// Forward Addis AI error status and messageres.status(error.response.status).json({error: "Error from Addis AI API",details: error.response.data,});} else {res.status(500).json({ error: "Internal server error" });}}});// Text-to-speech endpointapp.post("/api/tts", rateLimiter, async (req, res) => {try {const { text, language } = req.body;// Validate required fieldsif (!text) {return res.status(400).json({ error: "Text is required" });}// Forward request to Addis AIconst response = await axios.post(`${ADDIS_AI_BASE_URL}/audio`,{text,language: language || "am",},{headers: {"Content-Type": "application/json","X-API-Key": ADDIS_AI_API_KEY,},},);res.json(response.data);} catch (error) {console.error("Error calling Addis AI TTS:",error.response?.data || error.message,);res.status(error.response?.status || 500).json({error: "Error from Addis AI API",details: error.response?.data || error.message,});}});// Multi-modal chat with imageapp.post("/api/chat-with-image",rateLimiter,upload.single("image"),async (req, res) => {try {const { prompt, target_language } = req.body;const imageFile = req.file;// Validate required fieldsif (!prompt) {return res.status(400).json({ error: "Prompt is required" });}if (!imageFile) {return res.status(400).json({ error: "Image file is required" });}// Create form dataconst formData = new FormData();formData.append("image1", fs.createReadStream(imageFile.path));formData.append("request_data",JSON.stringify({prompt,target_language: target_language || "am",attachment_field_names: ["image1"],}),);// Forward request to Addis AIconst response = await axios.post(`${ADDIS_AI_BASE_URL}/chat_generate`,formData,{headers: {"Content-Type": "application/json","X-API-Key": ADDIS_AI_API_KEY,},},);// Clean up uploaded filefs.unlinkSync(imageFile.path);res.json(response.data);} catch (error) {console.error("Error calling Addis AI with image:",error.response?.data || error.message,);res.status(error.response?.status || 500).json({error: "Error from Addis AI API",details: error.response?.data || error.message,});}},);// Start serverapp.listen(port, () => {console.log(`Server running on port ${port}`);});
app.post("/api/chat-stream", rateLimiter, (req, res) => {try {const { prompt, target_language, conversation_history } = req.body;// Validate required fieldsif (!prompt) {return res.status(400).json({ error: "Prompt is required" });}// Set up response headers for streamingres.setHeader("Content-Type", "text/event-stream");res.setHeader("Cache-Control", "no-cache");res.setHeader("Connection", "keep-alive");// Create request optionsconst requestData = {prompt,target_language: target_language || "am",conversation_history: conversation_history || [],generation_config: {temperature: 0.7,stream: true,},};// Make a request to Addis AIconst httpsRequest = https.request({hostname: "api.addisassistant.com",path: "/chat_generate",method: "POST",headers: {"Content-Type": "application/json","X-API-Key": ADDIS_AI_API_KEY,},},(response) => {// Handle errors in the responseif (response.statusCode !== 200) {res.write(`data: ${JSON.stringify({error: `API responded with status ${response.statusCode}`,})}\n\n`,);res.end();return;}// Forward chunks to the clientresponse.on("data", (chunk) => {res.write(`data: ${chunk}\n\n`);});// End when Addis AI response endsresponse.on("end", () => {res.write("data: [DONE]\n\n");res.end();});},);// Handle request errorshttpsRequest.on("error", (err) => {console.error("Error in Addis AI streaming request:", err);res.write(`data: ${JSON.stringify({ error: "Internal server error" })}\n\n`,);res.end();});// Send the requesthttpsRequest.write(JSON.stringify(requestData));httpsRequest.end();} catch (error) {console.error("Error setting up Addis AI stream:", error);res.status(500).json({ error: "Internal server error" });}});
pip install fastapi uvicorn python-dotenv python-multipart requests
.env
file:
ADDIS_AI_API_KEY=your_api_key_here
main.py
file:
import osimport jsonimport requestsfrom dotenv import load_dotenvfrom fastapi import FastAPI, HTTPException, Depends, File, UploadFile, Formfrom fastapi.middleware.cors import CORSMiddlewarefrom pydantic import BaseModelfrom typing import Optional, List, Dict, Any# Load environment variablesload_dotenv()app = FastAPI(title="Addis AI Proxy API")# CORS configurationapp.add_middleware(CORSMiddleware,allow_origins=["*"], # Restrict in productionallow_credentials=True,allow_methods=["*"],allow_headers=["*"],)# Addis AI API configurationADDIS_AI_API_KEY = os.getenv("ADDIS_AI_API_KEY")ADDIS_AI_BASE_URL = "https://api.addisassistant.com/api/v1"# Modelsclass Message(BaseModel):role: strcontent: strclass GenerationConfig(BaseModel):temperature: float = 0.7stream: bool = Falseclass ChatRequest(BaseModel):prompt: strtarget_language: str = "am"conversation_history: Optional[List[Message]] = Nonegeneration_config: Optional[GenerationConfig] = Noneclass TTSRequest(BaseModel):text: strlanguage: str = "am"# Dependency for API key validationdef verify_api_key():if not ADDIS_AI_API_KEY:raise HTTPException(status_code=500, detail="API key not configured on server")return ADDIS_AI_API_KEY# Routes@app.post("/api/chat")async def chat_generate(request: ChatRequest, api_key: str = Depends(verify_api_key)):try:# Forward request to Addis AIresponse = requests.post(f"{ADDIS_AI_BASE_URL}/chat_generate",json=request.dict(),headers={"Content-Type": "application/json","X-API-Key": api_key,})# Handle errorsresponse.raise_for_status()return response.json()except requests.exceptions.RequestException as e:status_code = e.response.status_code if hasattr(e, 'response') else 500detail = e.response.json() if hasattr(e, 'response') else str(e)raise HTTPException(status_code=status_code, detail=detail)@app.post("/api/tts")async def text_to_speech(request: TTSRequest, api_key: str = Depends(verify_api_key)):try:# Forward request to Addis AIresponse = requests.post(f"{ADDIS_AI_BASE_URL}/audio",json=request.dict(),headers={"Content-Type": "application/json","X-API-Key": api_key,})# Handle errorsresponse.raise_for_status()return response.json()except requests.exceptions.RequestException as e:status_code = e.response.status_code if hasattr(e, 'response') else 500detail = e.response.json() if hasattr(e, 'response') else str(e)raise HTTPException(status_code=status_code, detail=detail)@app.post("/api/chat-with-image")async def chat_with_image(prompt: str = Form(...),target_language: str = Form("am"),image: UploadFile = File(...),api_key: str = Depends(verify_api_key)):try:# Prepare multipart form datafiles = {"image1": (image.filename, await image.read(), image.content_type)}data = {"request_data": json.dumps({"prompt": prompt,"target_language": target_language,"attachment_field_names": ["image1"]})}# Forward request to Addis AIresponse = requests.post(f"{ADDIS_AI_BASE_URL}/chat_generate",files=files,data=data,headers={"Content-Type": "application/json","X-API-Key": api_key,})# Handle errorsresponse.raise_for_status()return response.json()except requests.exceptions.RequestException as e:status_code = e.response.status_code if hasattr(e, 'response') else 500detail = e.response.json() if hasattr(e, 'response') else str(e)raise HTTPException(status_code=status_code, detail=detail)# Run the appif __name__ == "__main__":import uvicornuvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
from fastapi import Responsefrom starlette.background import BackgroundTaskimport asyncio@app.post("/api/chat-stream")async def chat_stream(request: ChatRequest, api_key: str = Depends(verify_api_key)):# Modify request to enable streamingrequest_data = request.dict()if "generation_config" not in request_data or request_data["generation_config"] is None:request_data["generation_config"] = {"temperature": 0.7, "stream": True}else:request_data["generation_config"]["stream"] = Trueasync def stream_response():try:# Set up streaming request to Addis AIwith requests.post(f"{ADDIS_AI_BASE_URL}/chat_generate",json=request_data,headers={"Content-Type": "application/json","X-API-Key": api_key,},stream=True) as response:# Handle errorsif response.status_code != 200:yield f"data: {json.dumps({'error': f'API responded with status {response.status_code}'})}\n\n"return# Stream each chunk back to the clientfor line in response.iter_lines():if line:yield f"data: {line.decode('utf-8')}\n\n"yield "data: [DONE]\n\n"except Exception as e:yield f"data: {json.dumps({'error': str(e)})}\n\n"return Response(content=stream_response(),media_type="text/event-stream")
index.php
file:
<?php// Addis AI API configuration$addisAiApiKey = getenv('ADDIS_AI_API_KEY');$addisAiBaseUrl = 'https://api.addisassistant.com';// Set headers for JSON responsesheader('Content-Type: application/json');header('Access-Control-Allow-Origin: *'); // Restrict in productionheader('Access-Control-Allow-Methods: GET, POST, OPTIONS');header('Access-Control-Allow-Headers: Content-Type, Authorization');// Handle OPTIONS request for CORS preflightif ($_SERVER['REQUEST_METHOD'] === 'OPTIONS') {exit(0);}// Verify API key is configuredif (!$addisAiApiKey) {http_response_code(500);echo json_encode(['error' => 'API key not configured on server']);exit;}// Parse request URI to determine endpoint$uri = parse_url($_SERVER['REQUEST_URI'], PHP_URL_PATH);$endpoint = null;if (strpos($uri, '/api/chat') === 0) {$endpoint = 'chat';} elseif (strpos($uri, '/api/tts') === 0) {$endpoint = 'tts';} elseif (strpos($uri, '/api/chat-with-image') === 0) {$endpoint = 'chat-with-image';} else {http_response_code(404);echo json_encode(['error' => 'Endpoint not found']);exit;}// Get request data$requestData = json_decode(file_get_contents('php://input'), true);// Handle different endpointsswitch ($endpoint) {case 'chat':handleChatRequest($requestData, $addisAiApiKey, $addisAiBaseUrl);break;case 'tts':handleTtsRequest($requestData, $addisAiApiKey, $addisAiBaseUrl);break;case 'chat-with-image':handleChatWithImageRequest($_FILES, $_POST, $addisAiApiKey, $addisAiBaseUrl);break;}// Function to handle chat generation requestsfunction handleChatRequest($requestData, $apiKey, $baseUrl) {// Validate requestif (!isset($requestData['prompt']) || empty($requestData['prompt'])) {http_response_code(400);echo json_encode(['error' => 'Prompt is required']);exit;}// Set default values if not providedif (!isset($requestData['target_language'])) {$requestData['target_language'] = 'am';}if (!isset($requestData['conversation_history'])) {$requestData['conversation_history'] = [];}if (!isset($requestData['generation_config'])) {$requestData['generation_config'] = ['temperature' => 0.7];}// Initialize cURL session$ch = curl_init($baseUrl . '/chat_generate');// Set cURL optionscurl_setopt($ch, CURLOPT_RETURNTRANSFER, true);curl_setopt($ch, CURLOPT_POST, true);curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($requestData));curl_setopt($ch, CURLOPT_HTTPHEADER, ['Content-Type: application/json','X-API-Key: ' . $apiKey]);// Execute cURL request$response = curl_exec($ch);$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);// Check for errorsif (curl_errno($ch)) {http_response_code(500);echo json_encode(['error' => 'Error connecting to Addis AI: ' . curl_error($ch)]);curl_close($ch);exit;}// Forward Addis AI responsehttp_response_code($httpCode);echo $response;// Close cURL sessioncurl_close($ch);}// Function to handle text-to-speech requestsfunction handleTtsRequest($requestData, $apiKey, $baseUrl) {// Validate requestif (!isset($requestData['text']) || empty($requestData['text'])) {http_response_code(400);echo json_encode(['error' => 'Text is required']);exit;}// Set default language if not providedif (!isset($requestData['language'])) {$requestData['language'] = 'am';}// Initialize cURL session$ch = curl_init($baseUrl . '/audio');// Set cURL optionscurl_setopt($ch, CURLOPT_RETURNTRANSFER, true);curl_setopt($ch, CURLOPT_POST, true);curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($requestData));curl_setopt($ch, CURLOPT_HTTPHEADER, ['Content-Type: application/json','X-API-Key: ' . $apiKey]);// Execute cURL request$response = curl_exec($ch);$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);// Check for errorsif (curl_errno($ch)) {http_response_code(500);echo json_encode(['error' => 'Error connecting to Addis AI: ' . curl_error($ch)]);curl_close($ch);exit;}// Forward Addis AI responsehttp_response_code($httpCode);echo $response;// Close cURL sessioncurl_close($ch);}// Function to handle chat with image requestsfunction handleChatWithImageRequest($files, $post, $apiKey, $baseUrl) {// Validate requestif (!isset($post['prompt']) || empty($post['prompt'])) {http_response_code(400);echo json_encode(['error' => 'Prompt is required']);exit;}if (!isset($files['image']) || $files['image']['error'] !== UPLOAD_ERR_OK) {http_response_code(400);echo json_encode(['error' => 'Image file is required']);exit;}// Set default language if not provided$targetLanguage = isset($post['target_language']) ? $post['target_language'] : 'am';// Prepare request data$requestData = json_encode(['prompt' => $post['prompt'],'target_language' => $targetLanguage,'attachment_field_names' => ['image1']]);// Initialize cURL session$ch = curl_init($baseUrl . '/chat_generate');// Create a cURL file$cFile = curl_file_create($files['image']['tmp_name'],$files['image']['type'],$files['image']['name']);// Prepare multipart form data$postFields = ['image1' => $cFile,'request_data' => $requestData];// Set cURL optionscurl_setopt($ch, CURLOPT_RETURNTRANSFER, true);curl_setopt($ch, CURLOPT_POST, true);curl_setopt($ch, CURLOPT_POSTFIELDS, $postFields);curl_setopt($ch, CURLOPT_HTTPHEADER, ['Content-Type: application/json','X-API-Key: ' . $apiKey]);// Execute cURL request$response = curl_exec($ch);$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);// Check for errorsif (curl_errno($ch)) {http_response_code(500);echo json_encode(['error' => 'Error connecting to Addis AI: ' . curl_error($ch)]);curl_close($ch);exit;}// Forward Addis AI responsehttp_response_code($httpCode);echo $response;// Close cURL sessioncurl_close($ch);}?>
.htaccess
file for URL rewriting:
RewriteEngine OnRewriteCond %{REQUEST_FILENAME} !-fRewriteCond %{REQUEST_FILENAME} !-dRewriteRule ^(.*)$ index.php [QSA,L]
// Example using Node.js with a simple in-memory cacheconst cache = new Map();const CACHE_TTL = 3600 * 1000; // 1 hour in millisecondsapp.post("/api/tts", rateLimiter, async (req, res) => {try {const { text, language } = req.body;// Create cache key from request parametersconst cacheKey = `tts:${language}:${text}`;// Check cacheif (cache.has(cacheKey)) {const cachedData = cache.get(cacheKey);if (cachedData.expiry > Date.now()) {return res.json(cachedData.data);}// Remove expired cache entrycache.delete(cacheKey);}// Forward request to Addis AIconst response = await axios.post(`${ADDIS_AI_BASE_URL}/audio`,{ text, language: language || "am" },{headers: {"Content-Type": "application/json","X-API-Key": ADDIS_AI_API_KEY,},},);// Cache the responsecache.set(cacheKey, {data: response.data,expiry: Date.now() + CACHE_TTL,});res.json(response.data);} catch (error) {// Error handling...}});
// Example using http-agent-keepalive with Node.jsconst { HttpsAgent } = require("agentkeepalive");const keepaliveAgent = new HttpsAgent({maxSockets: 100,maxFreeSockets: 10,timeout: 60000,freeSocketTimeout: 30000,});// Use the agent with axiosconst axiosInstance = axios.create({httpsAgent: keepaliveAgent,});
FROM node:16-alpineWORKDIR /appCOPY package*.json ./RUN npm install --productionCOPY . .ENV PORT=3000EXPOSE 3000CMD ["node", "app.js"]