Skip to content

Commit fd6bd27

Browse files
committed
add ai labels and explanation
1 parent 0c75048 commit fd6bd27

6 files changed

Lines changed: 313 additions & 81 deletions

File tree

backend/app/main.py

Lines changed: 12 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -70,31 +70,21 @@ def ai_process():
7070
)
7171
)
7272
loop.close()
73-
# print(f"AI processing complete. Result length: {len(str(ai_result))}") # Debug log
7473

75-
# Ensure both lists are lowercase or normalized if needed
76-
def extract_topic_name(s):
77-
# Try to extract the topic name before the first colon or dash, and strip formatting
78-
# Handles cases like '**visual-programming-editor:** ...' or 'visual-programming-editor: ...'
79-
match = re.match(r'[*]*([a-zA-Z0-9\-]+)[*]*[::]', s.strip())
80-
if match:
81-
return match.group(1)
82-
# Fallback: if the string is just the topic name
83-
return s.strip().strip('*')
84-
85-
intersection = []
86-
for ai_item in ai_result:
87-
topic_name = extract_topic_name(ai_item)
88-
if topic_name in selected_topics:
89-
intersection.append(ai_item)
90-
91-
# print("Selected topics:", selected_topics)
92-
# print("Intersection:", intersection)
93-
return jsonify({"success": True, "result": intersection})
74+
# Return all AI suggestions, not just the intersection
75+
return jsonify({
76+
"success": True,
77+
"result": ai_result # Return all suggestions with their explanations
78+
})
9479

9580
except Exception as e:
96-
# print(f"Error occurred: {str(e)}") # Debug log
97-
return jsonify(["error1", "error2", "error3"]), 500
81+
return jsonify(
82+
{
83+
"success": False,
84+
"error": str(e),
85+
"message": "An error occurred while processing the request",
86+
}
87+
), 500
9888

9989

10090
@app.route("/api/explain-topic", methods=["POST"])

backend/app/services/ai_service.py

Lines changed: 112 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import google.generativeai as genai
44
from openai import OpenAI
55
from fastapi import HTTPException
6+
import re
67

78
# Configure logging
89
logging.basicConfig(level=logging.DEBUG)
@@ -44,10 +45,26 @@ def initialize_client(self, model: str, api_key: str):
4445

4546
async def process_with_openai(
4647
self, prompt: str, topics: List[str], model: str, search_term: str
47-
) -> List[str]:
48+
) -> List[dict]:
4849
try:
49-
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\You only return a list of the k topics. K can be any number."""
50-
# print(full_prompt)
50+
full_prompt = f"""Search term: {search_term}
51+
Current topics: {', '.join(topics)}
52+
53+
{prompt}
54+
55+
IMPORTANT FORMATTING RULES:
56+
1. Do NOT use any markdown formatting (no asterisks, no bullet points, no bold/italic)
57+
2. Do NOT use numbers or bullet points
58+
3. Each suggestion must be on a new line
59+
4. Each line must be in the format: "topic: explanation"
60+
5. Keep explanations to one sentence
61+
6. Do not add any extra text or formatting
62+
63+
Example format:
64+
visual-programming-language: A programming language that uses visual elements instead of text
65+
flow-based-programming: A programming paradigm where programs are built by connecting nodes
66+
"""
67+
5168
response = self.openai_client.chat.completions.create(
5269
model=model,
5370
messages=[{"role": "user", "content": full_prompt}],
@@ -56,32 +73,109 @@ async def process_with_openai(
5673
)
5774

5875
suggestions = response.choices[0].message.content.strip().split("\n")
59-
# Clean up suggestions (remove bullet points, numbers, etc.)
60-
suggestions = [
61-
s.lstrip("- ").lstrip("* ").lstrip("1234567890. ") for s in suggestions
62-
]
63-
return [s for s in suggestions if s] # Remove empty strings
76+
# Process suggestions to extract topic and explanation
77+
processed_suggestions = []
78+
for s in suggestions:
79+
# Clean up markdown formatting and numbers
80+
s = s.strip()
81+
# Remove numbers and dots at start (e.g., "1. ", "2. ")
82+
s = re.sub(r'^\d+\.\s*', '', s)
83+
# Remove bullet points and asterisks
84+
s = re.sub(r'^[\*\-]\s*', '', s)
85+
# Remove any remaining markdown formatting
86+
s = re.sub(r'\*\*|\*|__|_', '', s)
87+
# Remove any leading/trailing whitespace
88+
s = s.strip()
89+
90+
if not s:
91+
continue
92+
93+
# Split on first colon
94+
parts = s.split(":", 1)
95+
if len(parts) == 2:
96+
topic = parts[0].strip()
97+
explanation = parts[1].strip()
98+
# Remove any remaining markdown from explanation
99+
explanation = re.sub(r'\*\*|\*|__|_', '', explanation)
100+
processed_suggestions.append({
101+
"topic": topic,
102+
"explanation": explanation
103+
})
104+
else:
105+
# If no explanation provided, use the topic as is
106+
processed_suggestions.append({
107+
"topic": s.strip(),
108+
"explanation": f"Suggested as relevant to {search_term}"
109+
})
110+
return processed_suggestions
64111

65112
except Exception as e:
66113
raise HTTPException(status_code=500, detail=f"OpenAI API error: {str(e)}")
67114

68-
async def process_with_gemini(self, prompt: str, topics: List[str], search_term: str) -> List[str]:
115+
async def process_with_gemini(self, prompt: str, topics: List[str], search_term: str) -> List[dict]:
69116
try:
70117
if not self.gemini_client:
71118
raise HTTPException(status_code=500, detail="Gemini client not initialized")
72119

73-
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\n You only return a list of the k topics. K can be any number. No other text."""
120+
full_prompt = f"""Search term: {search_term}
121+
Current topics: {', '.join(topics)}
122+
123+
{prompt}
124+
125+
IMPORTANT FORMATTING RULES:
126+
1. Do NOT use any markdown formatting (no asterisks, no bullet points, no bold/italic)
127+
2. Do NOT use numbers or bullet points
128+
3. Each suggestion must be on a new line
129+
4. Each line must be in the format: "topic: explanation"
130+
5. Keep explanations to one sentence
131+
6. Do not add any extra text or formatting
132+
133+
Example format:
134+
visual-programming-language: A programming language that uses visual elements instead of text
135+
flow-based-programming: A programming paradigm where programs are built by connecting nodes
136+
"""
74137

75138
response = self.gemini_client.generate_content(full_prompt)
139+
print("Raw response:", response.text) # Debug print
76140

77141
if response.text:
78142
suggestions = response.text.strip().split("\n")
79-
# Clean up suggestions (remove bullet points, numbers, etc.)
80-
suggestions = [
81-
s.lstrip("- ").lstrip("* ").lstrip("1234567890. ")
82-
for s in suggestions
83-
]
84-
return [s for s in suggestions if s] # Remove empty strings
143+
# Process suggestions to extract topic and explanation
144+
processed_suggestions = []
145+
for s in suggestions:
146+
# Clean up markdown formatting and numbers
147+
s = s.strip()
148+
# Remove numbers and dots at start (e.g., "1. ", "2. ")
149+
s = re.sub(r'^\d+\.\s*', '', s)
150+
# Remove bullet points and asterisks
151+
s = re.sub(r'^[\*\-]\s*', '', s)
152+
# Remove any remaining markdown formatting
153+
s = re.sub(r'\*\*|\*|__|_', '', s)
154+
# Remove any leading/trailing whitespace
155+
s = s.strip()
156+
157+
if not s:
158+
continue
159+
160+
# Split on first colon
161+
parts = s.split(":", 1)
162+
if len(parts) == 2:
163+
topic = parts[0].strip()
164+
explanation = parts[1].strip()
165+
# Remove any remaining markdown from explanation
166+
explanation = re.sub(r'\*\*|\*|__|_', '', explanation)
167+
processed_suggestions.append({
168+
"topic": topic,
169+
"explanation": explanation
170+
})
171+
else:
172+
# If no explanation provided, use the topic as is
173+
processed_suggestions.append({
174+
"topic": s.strip(),
175+
"explanation": f"Suggested as relevant to {search_term}"
176+
})
177+
print("Processed suggestions:", processed_suggestions) # Debug print
178+
return processed_suggestions
85179
return []
86180

87181
except Exception as e:
@@ -90,9 +184,9 @@ async def process_with_gemini(self, prompt: str, topics: List[str], search_term:
90184

91185
async def process_topics(
92186
self, model: str, api_key: str, prompt: str, topics: List[str], search_term: str
93-
) -> List[str]:
187+
) -> List[dict]:
94188
"""
95-
Process topics using the specified AI model and return suggestions.
189+
Process topics using the specified AI model and return suggestions with explanations.
96190
"""
97191
# Enhanced debug logging
98192
# logger.debug("\n=== Incoming Request Validation ===")

package-lock.json

Lines changed: 11 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
"@eslint/js": "^9.15.0",
4949
"@playwright/test": "^1.49.0",
5050
"@trivago/prettier-plugin-sort-imports": "^4.3.0",
51+
"@types/bootstrap": "^5.2.10",
5152
"@types/chroma-js": "^2.4.4",
5253
"@types/classnames": "^2.3.4",
5354
"@types/file-saver": "^2.0.7",

0 commit comments

Comments
 (0)