|
| 1 | +import logging |
| 2 | +from typing import List |
| 3 | +import google.generativeai as genai |
| 4 | +from openai import OpenAI |
| 5 | +from fastapi import HTTPException |
| 6 | + |
| 7 | +# Configure logging |
| 8 | +logging.basicConfig(level=logging.DEBUG) |
| 9 | +logger = logging.getLogger(__name__) |
| 10 | + |
| 11 | +class AITopicProcessor: |
| 12 | + def __init__(self): |
| 13 | + self.openai_client = None |
| 14 | + self.gemini_client = None |
| 15 | + |
| 16 | + def initialize_client(self, model: str, api_key: str): |
| 17 | + if model.startswith("gpt"): |
| 18 | + self.openai_client = OpenAI(api_key=api_key) |
| 19 | + elif model.startswith("gemini"): |
| 20 | + genai.configure(api_key=api_key) |
| 21 | + self.gemini_client = genai.GenerativeModel(model) |
| 22 | + |
| 23 | + async def process_with_openai( |
| 24 | + self, prompt: str, topics: List[str], model: str |
| 25 | + ) -> List[str]: |
| 26 | + try: |
| 27 | + full_prompt = f"""Current topics: {", ".join(topics)} |
| 28 | +
|
| 29 | +{prompt} |
| 30 | +
|
| 31 | +Please provide suggestions as a simple list, one per line. Keep each suggestion concise.""" |
| 32 | + |
| 33 | + response = self.openai_client.chat.completions.create( |
| 34 | + model=model, |
| 35 | + messages=[{"role": "user", "content": full_prompt}], |
| 36 | + temperature=0.7, |
| 37 | + max_tokens=500, |
| 38 | + ) |
| 39 | + |
| 40 | + suggestions = response.choices[0].message.content.strip().split("\n") |
| 41 | + # Clean up suggestions (remove bullet points, numbers, etc.) |
| 42 | + suggestions = [ |
| 43 | + s.lstrip("- ").lstrip("* ").lstrip("1234567890. ") for s in suggestions |
| 44 | + ] |
| 45 | + return [s for s in suggestions if s] # Remove empty strings |
| 46 | + |
| 47 | + except Exception as e: |
| 48 | + raise HTTPException(status_code=500, detail=f"OpenAI API error: {str(e)}") |
| 49 | + |
| 50 | + async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]: |
| 51 | + try: |
| 52 | + full_prompt = f"""Current topics: {", ".join(topics)} |
| 53 | +
|
| 54 | +{prompt} |
| 55 | +
|
| 56 | +Please provide suggestions as a simple list, one per line. Keep each suggestion concise.""" |
| 57 | + |
| 58 | + response = self.gemini_client.generate_content(full_prompt) |
| 59 | + |
| 60 | + if response.text: |
| 61 | + suggestions = response.text.strip().split("\n") |
| 62 | + # Clean up suggestions (remove bullet points, numbers, etc.) |
| 63 | + suggestions = [ |
| 64 | + s.lstrip("- ").lstrip("* ").lstrip("1234567890. ") |
| 65 | + for s in suggestions |
| 66 | + ] |
| 67 | + return [s for s in suggestions if s] # Remove empty strings |
| 68 | + return [] |
| 69 | + |
| 70 | + except Exception as e: |
| 71 | + raise HTTPException(status_code=500, detail=f"Gemini API error: {str(e)}") |
| 72 | + |
| 73 | + async def process_topics( |
| 74 | + self, model: str, api_key: str, prompt: str, topics: List[str] |
| 75 | + ) -> List[str]: |
| 76 | + """ |
| 77 | + Process topics using the specified AI model and return suggestions. |
| 78 | + """ |
| 79 | + # Enhanced debug logging |
| 80 | + logger.debug("\n=== Incoming Request Validation ===") |
| 81 | + logger.debug(f"Model: {model if model else 'NOT PROVIDED'}") |
| 82 | + logger.debug(f"API Key: {'[PROVIDED]' if api_key else 'NOT PROVIDED'}") |
| 83 | + logger.debug(f"Prompt: {prompt if prompt else 'NOT PROVIDED'}") |
| 84 | + logger.debug(f"Topics: {topics if topics else 'NOT PROVIDED'}") |
| 85 | + logger.debug(f"Topics length: {len(topics) if topics else 0}") |
| 86 | + logger.debug("Request data types:") |
| 87 | + logger.debug(f"- Model type: {type(model)}") |
| 88 | + logger.debug(f"- API Key type: {type(api_key)}") |
| 89 | + logger.debug(f"- Prompt type: {type(prompt)}") |
| 90 | + logger.debug(f"- Topics type: {type(topics)}") |
| 91 | + logger.debug("=" * 50) |
| 92 | + |
| 93 | + # More detailed input validation |
| 94 | + validation_errors = [] |
| 95 | + if not model or not isinstance(model, str): |
| 96 | + validation_errors.append("Invalid or missing model") |
| 97 | + if not api_key or not isinstance(api_key, str): |
| 98 | + validation_errors.append("Invalid or missing API key") |
| 99 | + if not isinstance(topics, list): |
| 100 | + validation_errors.append("Topics must be a list") |
| 101 | + elif len(topics) == 0: |
| 102 | + validation_errors.append("Topics list cannot be empty") |
| 103 | + if not prompt or not isinstance(prompt, str): |
| 104 | + validation_errors.append("Invalid or missing prompt") |
| 105 | + |
| 106 | + if validation_errors: |
| 107 | + error_message = "; ".join(validation_errors) |
| 108 | + logger.error(f"Validation failed: {error_message}") |
| 109 | + raise HTTPException(status_code=400, detail=error_message) |
| 110 | + |
| 111 | + try: |
| 112 | + # Initialize the appropriate client |
| 113 | + self.initialize_client(model, api_key) |
| 114 | + |
| 115 | + # Validate client initialization |
| 116 | + if model.startswith("gpt") and not self.openai_client: |
| 117 | + raise HTTPException( |
| 118 | + status_code=500, detail="Failed to initialize OpenAI client" |
| 119 | + ) |
| 120 | + if model.startswith("gemini") and not self.gemini_client: |
| 121 | + raise HTTPException( |
| 122 | + status_code=500, detail="Failed to initialize Gemini client" |
| 123 | + ) |
| 124 | + |
| 125 | + # Process with appropriate model |
| 126 | + if model.startswith("gpt"): |
| 127 | + return await self.process_with_openai(prompt, topics, model) |
| 128 | + elif model.startswith("gemini"): |
| 129 | + return await self.process_with_gemini(prompt, topics) |
| 130 | + else: |
| 131 | + raise HTTPException( |
| 132 | + status_code=400, detail=f"Unsupported model: {model}" |
| 133 | + ) |
| 134 | + |
| 135 | + except Exception as e: |
| 136 | + raise HTTPException(status_code=500, detail=str(e)) |
0 commit comments