Skip to content

Commit 7640c1b

Browse files
committed
ensure topic list
1 parent ff645f9 commit 7640c1b

3 files changed

Lines changed: 76 additions & 12 deletions

File tree

backend/app/main.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from services.ai_service import AITopicProcessor
55
import os
66
import asyncio
7+
import re
78

89
app = Flask(__name__)
910
CORS(
@@ -70,8 +71,26 @@ def ai_process():
7071
)
7172
loop.close()
7273
# print(f"AI processing complete. Result length: {len(str(ai_result))}") # Debug log
73-
print(ai_result)
74-
return jsonify({"success": True, "result": ai_result})
74+
75+
# Ensure both lists are lowercase or normalized if needed
76+
def extract_topic_name(s):
77+
# Try to extract the topic name before the first colon or dash, and strip formatting
78+
# Handles cases like '**visual-programming-editor:** ...' or 'visual-programming-editor: ...'
79+
match = re.match(r'[*]*([a-zA-Z0-9\-]+)[*]*[::]', s.strip())
80+
if match:
81+
return match.group(1)
82+
# Fallback: if the string is just the topic name
83+
return s.strip().strip('*')
84+
85+
intersection = []
86+
for ai_item in ai_result:
87+
topic_name = extract_topic_name(ai_item)
88+
if topic_name in selected_topics:
89+
intersection.append(ai_item)
90+
91+
# print("Selected topics:", selected_topics)
92+
# print("Intersection:", intersection)
93+
return jsonify({"success": True, "result": intersection})
7594

7695
except Exception as e:
7796
# print(f"Error occurred: {str(e)}") # Debug log

backend/app/services/ai_service.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ async def process_with_openai(
4646
self, prompt: str, topics: List[str], model: str, search_term: str
4747
) -> List[str]:
4848
try:
49-
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\nPlease provide suggestions as a simple list, one per line. Keep each suggestion concise."""
50-
49+
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\You only return a list of the k topics. K can be any number."""
50+
# print(full_prompt)
5151
response = self.openai_client.chat.completions.create(
5252
model=model,
5353
messages=[{"role": "user", "content": full_prompt}],
@@ -70,7 +70,7 @@ async def process_with_gemini(self, prompt: str, topics: List[str], search_term:
7070
if not self.gemini_client:
7171
raise HTTPException(status_code=500, detail="Gemini client not initialized")
7272

73-
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\n Please provide suggestions as a simple list, one per line. Keep each suggestion concise."""
73+
full_prompt = f"""Search term: {search_term}\nCurrent topics: {', '.join(topics)}\n\n{prompt}\n\n You only return a list of the k topics. K can be any number. No other text."""
7474

7575
response = self.gemini_client.generate_content(full_prompt)
7676

src/views/TopicHistogram.tsx

Lines changed: 52 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,10 @@ const TopicHistogram: FC = () => {
202202
// Add API key input modal - initialize to false instead of !apiKey
203203
const [showApiKeyModal, setShowApiKeyModal] = useState(false);
204204

205+
// Add state for AI suggestions
206+
const [llmSuggestionsState, setLlmSuggestionsState] = useState<string[]>([]);
207+
const [isLlmProcessing, setIsLlmProcessing] = useState(false);
208+
205209
// Function to handle topic click
206210
const handleTopicClick = (topic: string) => {
207211
if (!apiKey) {
@@ -381,6 +385,49 @@ const TopicHistogram: FC = () => {
381385
setTopicExplanation("");
382386
};
383387

388+
// Function to handle AI suggestions request
389+
const handleRequestSuggestions = async (model: string, prompt: string, apiKey: string, topics: string[]) => {
390+
setIsLlmProcessing(true);
391+
try {
392+
console.log('Requesting AI suggestions with:', { model, prompt, apiKey, topics });
393+
const response = await fetch(API_ENDPOINTS.AI_PROCESS, {
394+
method: 'POST',
395+
headers: {
396+
'Content-Type': 'application/json',
397+
},
398+
body: JSON.stringify({
399+
selectedModel: model,
400+
customPrompt: prompt,
401+
apiKey: apiKey,
402+
selectedTopics: topics,
403+
searchTerm: searchTerm
404+
})
405+
});
406+
407+
if (!response.ok) {
408+
throw new Error(`HTTP error! status: ${response.status}`);
409+
}
410+
411+
const data = await response.json();
412+
console.log('Received AI suggestions:', data);
413+
414+
if (data.success && Array.isArray(data.result)) {
415+
setLlmSuggestionsState(data.result);
416+
} else {
417+
throw new Error('Invalid response format from AI service');
418+
}
419+
} catch (error) {
420+
console.error('Error getting AI suggestions:', error);
421+
notify({
422+
message: "Failed to get AI suggestions. Please try again.",
423+
type: "error"
424+
});
425+
setLlmSuggestionsState([]);
426+
} finally {
427+
setIsLlmProcessing(false);
428+
}
429+
};
430+
384431
return (
385432
<main className="container-fluid py-4" style={{ height: '100vh', overflowY: 'auto' }}>
386433
{/* API Key Modal */}
@@ -694,13 +741,10 @@ const TopicHistogram: FC = () => {
694741
{/* Step 2: Topic Refinement */}
695742
{currentStep === 2 && (
696743
<TopicRefiner
697-
isLlmProcessing={false}
698-
llmSuggestions={[]}
699-
setLlmSuggestions={() => { }}
700-
onRequestSuggestions={async (model, prompt, apiKey, topics) => {
701-
// TODO: Implement AI suggestions
702-
console.log('Requesting suggestions with:', { model, prompt, apiKey, topics });
703-
}}
744+
isLlmProcessing={isLlmProcessing}
745+
llmSuggestions={llmSuggestionsState}
746+
setLlmSuggestions={setLlmSuggestionsState}
747+
onRequestSuggestions={handleRequestSuggestions}
704748
selectedTopics={selectedTopics}
705749
selectLlmSuggestion={(suggestion) => {
706750
if (!selectedTopics.includes(suggestion)) {
@@ -715,6 +759,7 @@ const TopicHistogram: FC = () => {
715759
searchTerm={searchTerm}
716760
/>
717761
)}
762+
{/* {console.log(onRequestSuggestions)} */}
718763
</main>
719764
);
720765
};

0 commit comments

Comments
 (0)