@@ -43,14 +43,10 @@ def initialize_client(self, model: str, api_key: str):
4343 raise HTTPException (status_code = 500 , detail = f"Error initializing Gemini: { str (e )} " )
4444
4545 async def process_with_openai (
46- self , prompt : str , topics : List [str ], model : str
46+ self , prompt : str , topics : List [str ], model : str , search_term : str
4747 ) -> List [str ]:
4848 try :
49- full_prompt = f"""Current topics: { ", " .join (topics )}
50-
51- { prompt }
52-
53- Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
49+ full_prompt = f"""Search term: { search_term } \n Current topics: { ', ' .join (topics )} \n \n { prompt } \n \n Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
5450
5551 response = self .openai_client .chat .completions .create (
5652 model = model ,
@@ -69,16 +65,12 @@ async def process_with_openai(
6965 except Exception as e :
7066 raise HTTPException (status_code = 500 , detail = f"OpenAI API error: { str (e )} " )
7167
72- async def process_with_gemini (self , prompt : str , topics : List [str ]) -> List [str ]:
68+ async def process_with_gemini (self , prompt : str , topics : List [str ], search_term : str ) -> List [str ]:
7369 try :
7470 if not self .gemini_client :
7571 raise HTTPException (status_code = 500 , detail = "Gemini client not initialized" )
7672
77- full_prompt = f"""Current topics: { ", " .join (topics )}
78-
79- { prompt }
80-
81- Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
73+ full_prompt = f"""Search term: { search_term } \n Current topics: { ', ' .join (topics )} \n \n { prompt } \n \n Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
8274
8375 response = self .gemini_client .generate_content (full_prompt )
8476
@@ -97,24 +89,26 @@ async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]
9789 raise HTTPException (status_code = 500 , detail = f"Gemini API error: { str (e )} " )
9890
9991 async def process_topics (
100- self , model : str , api_key : str , prompt : str , topics : List [str ]
92+ self , model : str , api_key : str , prompt : str , topics : List [str ], search_term : str
10193 ) -> List [str ]:
10294 """
10395 Process topics using the specified AI model and return suggestions.
10496 """
10597 # Enhanced debug logging
106- logger .debug ("\n === Incoming Request Validation ===" )
107- logger .debug (f"Model: { model if model else 'NOT PROVIDED' } " )
108- logger .debug (f"API Key: { '[PROVIDED]' if api_key else 'NOT PROVIDED' } " )
109- logger .debug (f"Prompt: { prompt if prompt else 'NOT PROVIDED' } " )
110- logger .debug (f"Topics: { topics if topics else 'NOT PROVIDED' } " )
111- logger .debug (f"Topics length: { len (topics ) if topics else 0 } " )
112- logger .debug ("Request data types:" )
113- logger .debug (f"- Model type: { type (model )} " )
114- logger .debug (f"- API Key type: { type (api_key )} " )
115- logger .debug (f"- Prompt type: { type (prompt )} " )
116- logger .debug (f"- Topics type: { type (topics )} " )
117- logger .debug ("=" * 50 )
98+ # logger.debug("\n=== Incoming Request Validation ===")
99+ # logger.debug(f"Model: {model if model else 'NOT PROVIDED'}")
100+ # logger.debug(f"API Key: {'[PROVIDED]' if api_key else 'NOT PROVIDED'}")
101+ # logger.debug(f"Prompt: {prompt if prompt else 'NOT PROVIDED'}")
102+ # logger.debug(f"Topics: {topics if topics else 'NOT PROVIDED'}")
103+ # logger.debug(f"Search Term: {search_term if search_term else 'NOT PROVIDED'}")
104+ # logger.debug(f"Topics length: {len(topics) if topics else 0}")
105+ # logger.debug("Request data types:")
106+ # logger.debug(f"- Model type: {type(model)}")
107+ # logger.debug(f"- API Key type: {type(api_key)}")
108+ # logger.debug(f"- Prompt type: {type(prompt)}")
109+ # logger.debug(f"- Topics type: {type(topics)}")
110+ # logger.debug(f"- Search Term type: {type(search_term)}")
111+ # logger.debug("=" * 50)
118112
119113 # More detailed input validation
120114 validation_errors = []
@@ -128,6 +122,8 @@ async def process_topics(
128122 validation_errors .append ("Topics list cannot be empty" )
129123 if not prompt or not isinstance (prompt , str ):
130124 validation_errors .append ("Invalid or missing prompt" )
125+ if not search_term or not isinstance (search_term , str ):
126+ validation_errors .append ("Invalid or missing search term" )
131127
132128 if validation_errors :
133129 error_message = "; " .join (validation_errors )
@@ -150,9 +146,9 @@ async def process_topics(
150146
151147 # Process with appropriate model
152148 if model .startswith ("gpt" ):
153- return await self .process_with_openai (prompt , topics , model )
149+ return await self .process_with_openai (prompt , topics , model , search_term )
154150 elif model .startswith ("gemini" ):
155- return await self .process_with_gemini (prompt , topics )
151+ return await self .process_with_gemini (prompt , topics , search_term )
156152 else :
157153 raise HTTPException (
158154 status_code = 400 , detail = f"Unsupported model: { model } "
0 commit comments