Skip to content

Commit ff645f9

Browse files
committed
ai suggestion work on backend
1 parent 8c7d577 commit ff645f9

3 files changed

Lines changed: 63 additions & 53 deletions

File tree

backend/app/main.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -47,28 +47,30 @@ def ai_process():
4747
try:
4848
if request.method == "POST":
4949
data = request.get_json()
50-
# print(data) # Debug log
50+
# print("Received data from frontend:", data) # Debug print
5151

5252
# Extract parameters using frontend names
5353
model = data.get("selectedModel", "gpt-3.5-turbo")
54-
api_token = data.get("apiKey", "")
54+
api_key = data.get("apiKey", "")
5555
prompt = data.get("customPrompt", "")
5656
selected_topics = data.get("selectedTopics", [])
57-
58-
# print(f"Selected topics: {selected_topics}") # Debug log
59-
# print(f"Using model: {model}") # Debug log
60-
# print(f"Prompt length: {len(prompt)}") # Debug log
57+
search_term = data.get("searchTerm", "")
6158

6259
# Use the AI processor to analyze the topics
63-
# print("About to call AI processor...") # Debug log
64-
ai_result = ai_processor.process_topics(
65-
model=model,
66-
api_token=api_token,
67-
prompt=prompt,
68-
selected_topics=selected_topics,
60+
loop = asyncio.new_event_loop()
61+
asyncio.set_event_loop(loop)
62+
ai_result = loop.run_until_complete(
63+
ai_processor.process_topics(
64+
model=model,
65+
api_key=api_key,
66+
prompt=prompt,
67+
topics=selected_topics,
68+
search_term=search_term
69+
)
6970
)
71+
loop.close()
7072
# print(f"AI processing complete. Result length: {len(str(ai_result))}") # Debug log
71-
73+
print(ai_result)
7274
return jsonify({"success": True, "result": ai_result})
7375

7476
except Exception as e:

backend/app/services/ai_service.py

Lines changed: 23 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,10 @@ def initialize_client(self, model: str, api_key: str):
4343
raise HTTPException(status_code=500, detail=f"Error initializing Gemini: {str(e)}")
4444

4545
async def process_with_openai(
46-
self, prompt: str, topics: List[str], model: str
46+
self, prompt: str, topics: List[str], model: str, search_term: str
4747
) -> List[str]:
4848
try:
49-
full_prompt = f"""Current topics: {", ".join(topics)}
50-
51-
{prompt}
52-
53-
Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
49+
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\nPlease provide suggestions as a simple list, one per line. Keep each suggestion concise."""
5450

5551
response = self.openai_client.chat.completions.create(
5652
model=model,
@@ -69,16 +65,12 @@ async def process_with_openai(
6965
except Exception as e:
7066
raise HTTPException(status_code=500, detail=f"OpenAI API error: {str(e)}")
7167

72-
async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]:
68+
async def process_with_gemini(self, prompt: str, topics: List[str], search_term: str) -> List[str]:
7369
try:
7470
if not self.gemini_client:
7571
raise HTTPException(status_code=500, detail="Gemini client not initialized")
7672

77-
full_prompt = f"""Current topics: {", ".join(topics)}
78-
79-
{prompt}
80-
81-
Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
73+
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\n Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
8274

8375
response = self.gemini_client.generate_content(full_prompt)
8476

@@ -97,24 +89,26 @@ async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]
9789
raise HTTPException(status_code=500, detail=f"Gemini API error: {str(e)}")
9890

9991
async def process_topics(
100-
self, model: str, api_key: str, prompt: str, topics: List[str]
92+
self, model: str, api_key: str, prompt: str, topics: List[str], search_term: str
10193
) -> List[str]:
10294
"""
10395
Process topics using the specified AI model and return suggestions.
10496
"""
10597
# Enhanced debug logging
106-
logger.debug("\n=== Incoming Request Validation ===")
107-
logger.debug(f"Model: {model if model else 'NOT PROVIDED'}")
108-
logger.debug(f"API Key: {'[PROVIDED]' if api_key else 'NOT PROVIDED'}")
109-
logger.debug(f"Prompt: {prompt if prompt else 'NOT PROVIDED'}")
110-
logger.debug(f"Topics: {topics if topics else 'NOT PROVIDED'}")
111-
logger.debug(f"Topics length: {len(topics) if topics else 0}")
112-
logger.debug("Request data types:")
113-
logger.debug(f"- Model type: {type(model)}")
114-
logger.debug(f"- API Key type: {type(api_key)}")
115-
logger.debug(f"- Prompt type: {type(prompt)}")
116-
logger.debug(f"- Topics type: {type(topics)}")
117-
logger.debug("=" * 50)
98+
# logger.debug("\n=== Incoming Request Validation ===")
99+
# logger.debug(f"Model: {model if model else 'NOT PROVIDED'}")
100+
# logger.debug(f"API Key: {'[PROVIDED]' if api_key else 'NOT PROVIDED'}")
101+
# logger.debug(f"Prompt: {prompt if prompt else 'NOT PROVIDED'}")
102+
# logger.debug(f"Topics: {topics if topics else 'NOT PROVIDED'}")
103+
# logger.debug(f"Search Term: {search_term if search_term else 'NOT PROVIDED'}")
104+
# logger.debug(f"Topics length: {len(topics) if topics else 0}")
105+
# logger.debug("Request data types:")
106+
# logger.debug(f"- Model type: {type(model)}")
107+
# logger.debug(f"- API Key type: {type(api_key)}")
108+
# logger.debug(f"- Prompt type: {type(prompt)}")
109+
# logger.debug(f"- Topics type: {type(topics)}")
110+
# logger.debug(f"- Search Term type: {type(search_term)}")
111+
# logger.debug("=" * 50)
118112

119113
# More detailed input validation
120114
validation_errors = []
@@ -128,6 +122,8 @@ async def process_topics(
128122
validation_errors.append("Topics list cannot be empty")
129123
if not prompt or not isinstance(prompt, str):
130124
validation_errors.append("Invalid or missing prompt")
125+
if not search_term or not isinstance(search_term, str):
126+
validation_errors.append("Invalid or missing search term")
131127

132128
if validation_errors:
133129
error_message = "; ".join(validation_errors)
@@ -150,9 +146,9 @@ async def process_topics(
150146

151147
# Process with appropriate model
152148
if model.startswith("gpt"):
153-
return await self.process_with_openai(prompt, topics, model)
149+
return await self.process_with_openai(prompt, topics, model, search_term)
154150
elif model.startswith("gemini"):
155-
return await self.process_with_gemini(prompt, topics)
151+
return await self.process_with_gemini(prompt, topics, search_term)
156152
else:
157153
raise HTTPException(
158154
status_code=400, detail=f"Unsupported model: {model}"

src/components/TopicRefiner.tsx

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,21 +18,13 @@ interface AIModel {
1818
}
1919

2020
const AI_MODELS: AIModel[] = [
21-
{
22-
id: 'gpt-4',
23-
name: 'OpenAI GPT-4'
24-
},
2521
{
2622
id: 'gpt-3.5-turbo',
27-
name: 'OpenAI GPT-3.5'
28-
},
29-
{
30-
id: 'gemini-pro',
31-
name: 'Google Gemini Pro'
23+
name: 'OpenAI GPT-3.5 Turbo'
3224
},
3325
{
34-
id: 'gemini-ultra',
35-
name: 'Google Gemini Ultra'
26+
id: 'gemini-2.0-flash',
27+
name: 'Google Gemini 2.0 Flash'
3628
}
3729
];
3830

@@ -68,7 +60,7 @@ export const TopicRefiner: FC<TopicRefinerProps> = ({
6860
const [showPromptModal, setShowPromptModal] = useState(false);
6961
const [showWelcomeModal, setShowWelcomeModal] = useState(true);
7062
const [customPrompt, setCustomPrompt] = useState(
71-
"Please analyze these topics and suggest related or more specific topics that might be relevant."
63+
"Select the top K most relevant topics from the list of {Current topics} based on their relevance to the {Search Term}."
7264
);
7365
const [selectedModel, setSelectedModel] = useState('gpt-4');
7466
const [apiKey, setApiKey] = useState('');
@@ -254,6 +246,26 @@ export const TopicRefiner: FC<TopicRefinerProps> = ({
254246
Customize Topics
255247
</h3>
256248

249+
{/* AI Suggestions (in right column) */}
250+
{llmSuggestions.length > 0 && (
251+
<div className="mb-4">
252+
<h6 className="text-muted mb-2">AI Suggestions (in right column)</h6>
253+
<div className="list-group" style={{ maxHeight: "200px", overflowY: "auto" }}>
254+
{llmSuggestions.filter(suggestion => !selectedTopics.includes(suggestion)).map((suggestion) => (
255+
<div key={suggestion} className="list-group-item d-flex justify-content-between align-items-center">
256+
<span>{suggestion}</span>
257+
<button
258+
className="btn btn-sm btn-outline-primary"
259+
onClick={() => selectLlmSuggestion(suggestion)}
260+
>
261+
<Plus size={16} />
262+
</button>
263+
</div>
264+
))}
265+
</div>
266+
</div>
267+
)}
268+
257269
<div className="mb-4">
258270
<div className="input-group">
259271
<input
@@ -422,7 +434,7 @@ export const TopicRefiner: FC<TopicRefinerProps> = ({
422434
/>
423435
<small className="text-muted">
424436
Customize how the AI should analyze and suggest topics. Be specific about what kind of suggestions
425-
you're looking for.
437+
you're looking for and you should include search term and current topics in the prompt.
426438
</small>
427439
</div>
428440
</div>

0 commit comments

Comments
 (0)