Skip to content

Commit 5a67d59

Browse files
committed
add ai explain
1 parent bfe6f69 commit 5a67d59

4 files changed

Lines changed: 438 additions & 64 deletions

File tree

backend/app/main.py

Lines changed: 120 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -2,79 +2,156 @@
22
from flask_cors import CORS
33
from services.topic_service import TopicService
44
from services.ai_service import AITopicProcessor
5+
import os
6+
import asyncio
57

68
app = Flask(__name__)
7-
CORS(app, resources={
8-
r"/*": {
9-
"origins": "*",
10-
"methods": ["GET", "POST", "OPTIONS"],
11-
"allow_headers": ["Content-Type", "Authorization"]
12-
}
13-
})
9+
CORS(
10+
app,
11+
resources={
12+
r"/*": {
13+
"origins": "*",
14+
"methods": ["GET", "POST", "OPTIONS"],
15+
"allow_headers": ["Content-Type", "Authorization"],
16+
}
17+
},
18+
)
1419

1520
topic_service = TopicService()
1621
ai_processor = AITopicProcessor()
1722

18-
@app.route('/api/process-topics', methods=['GET', 'POST'])
23+
24+
@app.route("/api/process-topics", methods=["GET", "POST"])
1925
def process_topics():
2026
try:
21-
if request.method == 'POST':
27+
if request.method == "POST":
2228
data = request.get_json()
23-
search_term = data.get('searchTerm', '')
29+
search_term = data.get("searchTerm", "")
2430
else:
25-
search_term = request.args.get('searchTerm', '')
31+
search_term = request.args.get("searchTerm", "")
2632
result = topic_service.process_topics(search_term)
2733
return jsonify(result)
28-
34+
2935
except Exception as e:
30-
return jsonify({
31-
"success": False,
32-
"error": str(e),
33-
"message": "An error occurred while processing the request"
34-
}), 500
36+
return jsonify(
37+
{
38+
"success": False,
39+
"error": str(e),
40+
"message": "An error occurred while processing the request",
41+
}
42+
), 500
43+
3544

36-
@app.route('/api/ai-process', methods=['GET', 'POST'])
45+
@app.route("/api/ai-process", methods=["GET", "POST"])
3746
def ai_process():
3847
try:
39-
if request.method == 'POST':
48+
if request.method == "POST":
4049
data = request.get_json()
41-
print(data)
42-
50+
# print(data) # Debug log
51+
4352
# Extract parameters using frontend names
44-
model = data.get('selectedModel', 'gpt-3.5-turbo')
45-
api_token = data.get('apiKey', '')
46-
prompt = data.get('customPrompt', '')
47-
selected_topics = data.get('selectedTopics', [])
48-
49-
print(f"Selected topics: {selected_topics}")
50-
print(f"Using model: {model}") # Debug log
51-
print(f"Prompt length: {len(prompt)}") # Debug log
52-
53+
model = data.get("selectedModel", "gpt-3.5-turbo")
54+
api_token = data.get("apiKey", "")
55+
prompt = data.get("customPrompt", "")
56+
selected_topics = data.get("selectedTopics", [])
57+
58+
# print(f"Selected topics: {selected_topics}") # Debug log
59+
# print(f"Using model: {model}") # Debug log
60+
# print(f"Prompt length: {len(prompt)}") # Debug log
61+
5362
# Use the AI processor to analyze the topics
54-
print("About to call AI processor...") # Debug log
63+
# print("About to call AI processor...") # Debug log
5564
ai_result = ai_processor.process_topics(
5665
model=model,
5766
api_token=api_token,
5867
prompt=prompt,
59-
selected_topics=selected_topics
68+
selected_topics=selected_topics,
6069
)
61-
print(f"AI processing complete. Result length: {len(str(ai_result))}") # Debug log
70+
# print(f"AI processing complete. Result length: {len(str(ai_result))}") # Debug log
71+
72+
return jsonify({"success": True, "result": ai_result})
73+
74+
except Exception as e:
75+
# print(f"Error occurred: {str(e)}") # Debug log
76+
return jsonify(["error1", "error2", "error3"]), 500
77+
78+
79+
@app.route("/api/explain-topic", methods=["POST"])
80+
def explain_topic():
81+
try:
82+
data = request.get_json()
6283

63-
return jsonify({
64-
"success": True,
65-
"result": ai_result
66-
})
67-
84+
topic = data.get("topic", "")
85+
search_term = data.get("searchTerm", "")
86+
original_topic = data.get("originalTopic", "")
87+
api_key = data.get("apiKey", "")
88+
89+
if not topic or not search_term or not original_topic:
90+
return jsonify(
91+
{
92+
"success": False,
93+
"message": "Missing required parameters: topic, searchTerm, or originalTopic",
94+
}
95+
), 400
96+
97+
if not api_key:
98+
return jsonify(
99+
{
100+
"success": False,
101+
"message": "API key is required",
102+
}
103+
), 400
104+
105+
# Create a prompt for the AI to explain the topic
106+
prompt = f"""Explain '{topic}' in the context of '{search_term}' following this structure:
107+
1. If it's an abbreviation, what it stands for and its common meaning in software development
108+
2. How it's typically defined in technical documentation or Wikipedia
109+
3. Its specific relevance to {search_term} and why developers use this term in repositories
110+
Keep it concise but informative (1-2 sentences)."""
111+
112+
try:
113+
# Create an event loop and run the async function
114+
loop = asyncio.new_event_loop()
115+
asyncio.set_event_loop(loop)
116+
117+
# Use Gemini for explanations
118+
explanation = loop.run_until_complete(
119+
ai_processor.process_topics(
120+
model="gemini-1.5-flash",
121+
api_key=api_key, # Use the API key from the request
122+
prompt=prompt,
123+
topics=[topic],
124+
)
125+
)
126+
loop.close()
127+
128+
if explanation and len(explanation) > 0:
129+
return jsonify({"success": True, "explanation": explanation[0]})
130+
else:
131+
return jsonify(
132+
{"success": False, "message": "Failed to generate explanation"}
133+
), 500
134+
135+
except Exception as ai_error:
136+
raise ai_error
137+
68138
except Exception as e:
69-
print(f"Error occurred: {str(e)}") # Debug log
70-
return jsonify(["error1","error2","error3"]), 500
139+
return jsonify(
140+
{
141+
"success": False,
142+
"error": str(e),
143+
"message": "An error occurred while generating the explanation",
144+
}
145+
), 500
146+
71147

72-
@app.route('/')
148+
@app.route("/")
73149
def home():
74150
return "Hello World!"
75151

76-
if __name__ == '__main__':
152+
153+
if __name__ == "__main__":
77154
print("Starting Flask server...")
78155
port = 5002
79156
print(f"Server running on: http://127.0.0.1:{port}")
80-
app.run(host='127.0.0.1', port=port, debug=True)
157+
app.run(host="127.0.0.1", port=port, debug=True)

backend/app/services/ai_service.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,35 @@ class AITopicProcessor:
1212
def __init__(self):
1313
self.openai_client = None
1414
self.gemini_client = None
15+
self.available_models = None
1516

1617
def initialize_client(self, model: str, api_key: str):
1718
if model.startswith("gpt"):
1819
self.openai_client = OpenAI(api_key=api_key)
1920
elif model.startswith("gemini"):
2021
genai.configure(api_key=api_key)
21-
self.gemini_client = genai.GenerativeModel(model)
22+
# List available models
23+
try:
24+
models = genai.list_models()
25+
self.available_models = [m.name for m in models]
26+
logger.debug(f"Available Gemini models: {self.available_models}")
27+
28+
# Try to use the requested model first, then fall back to gemini-1.5-flash
29+
requested_model = f"models/{model}"
30+
if requested_model in self.available_models:
31+
logger.debug(f"Using requested Gemini model: {requested_model}")
32+
self.gemini_client = genai.GenerativeModel(requested_model)
33+
else:
34+
# Fall back to gemini-1.5-flash
35+
fallback_model = "models/gemini-1.5-flash"
36+
if fallback_model in self.available_models:
37+
logger.debug(f"Requested model not found, using fallback: {fallback_model}")
38+
self.gemini_client = genai.GenerativeModel(fallback_model)
39+
else:
40+
raise HTTPException(status_code=500, detail=f"Neither requested model {requested_model} nor fallback model {fallback_model} found")
41+
except Exception as e:
42+
logger.error(f"Error listing Gemini models: {str(e)}")
43+
raise HTTPException(status_code=500, detail=f"Error initializing Gemini: {str(e)}")
2244

2345
async def process_with_openai(
2446
self, prompt: str, topics: List[str], model: str
@@ -49,6 +71,9 @@ async def process_with_openai(
4971

5072
async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]:
5173
try:
74+
if not self.gemini_client:
75+
raise HTTPException(status_code=500, detail="Gemini client not initialized")
76+
5277
full_prompt = f"""Current topics: {", ".join(topics)}
5378
5479
{prompt}
@@ -68,6 +93,7 @@ async def process_with_gemini(self, prompt: str, topics: List[str]) -> List[str]
6893
return []
6994

7095
except Exception as e:
96+
logger.error(f"Gemini API error: {str(e)}")
7197
raise HTTPException(status_code=500, detail=f"Gemini API error: {str(e)}")
7298

7399
async def process_topics(

src/lib/config.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,5 @@ export const API_BASE_URL = 'http://localhost:5002';
55
export const API_ENDPOINTS = {
66
PROCESS_TOPICS: `${API_BASE_URL}/api/process-topics`,
77
AI_PROCESS: `${API_BASE_URL}/api/ai-process`,
8+
EXPLAIN_TOPIC: `${API_BASE_URL}/api/explain-topic`,
89
} as const;

0 commit comments

Comments
 (0)