Skip to content

Commit 45020eb

Browse files
authored
Merge pull request #12 from data-exp-lab/ui
Topic Filtering Implementation
2 parents 05f6e9d + fd6bd27 commit 45020eb

35 files changed

Lines changed: 7464 additions & 238 deletions

.gitignore

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,14 @@
1+
# Development
2+
.next
13
node_modules
2-
*.ipython_checkpoints
4+
.env.local
5+
.env.development.local
6+
.env.test.local
7+
.env.production.local
8+
.ipynb_checkpoints/
39
dist
410
.DS_Store
5-
*temp
11+
*temp
12+
*repo_metadata.json
13+
__pycache__
14+
*.duckdb

README.md

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
<div align="center">
32
<img src="./public/deepgit_logo.png" alt="WESE Logo" width="150">
43
<h1 align="center">DeepGit</h1>
@@ -12,8 +11,29 @@ DeepGit is a free, open-source web application designed to help researchers and
1211
<img src="./public/ossci_logo.jpg" width="120" />
1312
</p>
1413

14+
# Development
15+
16+
## Prerequisites
17+
- Node.js (v16 or higher)
18+
- npm (v8 or higher)
19+
20+
## Getting Started
21+
22+
1. Clone the repository:
23+
```bash
24+
git clone https://github.com/data-exp-lab/deepgit.git
25+
cd deepgit
26+
```
1527

28+
2. Install dependencies:
29+
```bash
30+
npm install
31+
```
1632

33+
3. Start the development server:
34+
```bash
35+
npm run dev:watch
36+
```
1737
# Acknowledgment
1838
DeepGit is built upon [Retina](https://ouestware.gitlab.io/retina/1.0.0-beta.4/#/) developed by [OuestWare](https://www.ouestware.com/en/)
1939

backend/app/__init__.py

Whitespace-only changes.

backend/app/main.py

Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
from flask import Flask, jsonify, request
2+
from flask_cors import CORS
3+
from services.topic_service import TopicService
4+
from services.ai_service import AITopicProcessor
5+
import os
6+
import asyncio
7+
import re
8+
9+
app = Flask(__name__)
10+
CORS(
11+
app,
12+
resources={
13+
r"/*": {
14+
"origins": "*",
15+
"methods": ["GET", "POST", "OPTIONS"],
16+
"allow_headers": ["Content-Type", "Authorization"],
17+
}
18+
},
19+
)
20+
21+
topic_service = TopicService()
22+
ai_processor = AITopicProcessor()
23+
24+
25+
@app.route("/api/process-topics", methods=["GET", "POST"])
26+
def process_topics():
27+
try:
28+
if request.method == "POST":
29+
data = request.get_json()
30+
search_term = data.get("searchTerm", "")
31+
else:
32+
search_term = request.args.get("searchTerm", "")
33+
result = topic_service.process_topics(search_term)
34+
return jsonify(result)
35+
36+
except Exception as e:
37+
return jsonify(
38+
{
39+
"success": False,
40+
"error": str(e),
41+
"message": "An error occurred while processing the request",
42+
}
43+
), 500
44+
45+
46+
@app.route("/api/ai-process", methods=["GET", "POST"])
47+
def ai_process():
48+
try:
49+
if request.method == "POST":
50+
data = request.get_json()
51+
# print("Received data from frontend:", data) # Debug print
52+
53+
# Extract parameters using frontend names
54+
model = data.get("selectedModel", "gpt-3.5-turbo")
55+
api_key = data.get("apiKey", "")
56+
prompt = data.get("customPrompt", "")
57+
selected_topics = data.get("selectedTopics", [])
58+
search_term = data.get("searchTerm", "")
59+
60+
# Use the AI processor to analyze the topics
61+
loop = asyncio.new_event_loop()
62+
asyncio.set_event_loop(loop)
63+
ai_result = loop.run_until_complete(
64+
ai_processor.process_topics(
65+
model=model,
66+
api_key=api_key,
67+
prompt=prompt,
68+
topics=selected_topics,
69+
search_term=search_term
70+
)
71+
)
72+
loop.close()
73+
74+
# Return all AI suggestions, not just the intersection
75+
return jsonify({
76+
"success": True,
77+
"result": ai_result # Return all suggestions with their explanations
78+
})
79+
80+
except Exception as e:
81+
return jsonify(
82+
{
83+
"success": False,
84+
"error": str(e),
85+
"message": "An error occurred while processing the request",
86+
}
87+
), 500
88+
89+
90+
@app.route("/api/explain-topic", methods=["POST"])
91+
def explain_topic():
92+
try:
93+
data = request.get_json()
94+
print("Received explain-topic request with data:", {k: v for k, v in data.items() if k != 'apiKey'}) # Log data without API key
95+
96+
topic = data.get("topic", "")
97+
search_term = data.get("searchTerm", "")
98+
original_topic = data.get("originalTopic", "")
99+
api_key = data.get("apiKey", "")
100+
101+
if not topic or not search_term or not original_topic:
102+
print("Missing required parameters:", {
103+
"topic": bool(topic),
104+
"search_term": bool(search_term),
105+
"original_topic": bool(original_topic)
106+
})
107+
return jsonify(
108+
{
109+
"success": False,
110+
"message": "Missing required parameters: topic, searchTerm, or originalTopic",
111+
}
112+
), 400
113+
114+
if not api_key:
115+
print("Missing API key")
116+
return jsonify(
117+
{
118+
"success": False,
119+
"message": "API key is required",
120+
}
121+
), 400
122+
123+
# Create a prompt for the AI to explain the topic
124+
prompt = f"""Explain '{topic}' in the context of '{search_term}'.
125+
If it's an abbreviation, what it stands for in '{search_term}'
126+
Keep it concise but informative (1-2 sentences)."""
127+
# print("Generated prompt:", prompt)
128+
129+
try:
130+
# Create an event loop and run the async function
131+
loop = asyncio.new_event_loop()
132+
asyncio.set_event_loop(loop)
133+
134+
# print("Initializing AI processor with Gemini model")
135+
# Use Gemini for explanations
136+
explanation = loop.run_until_complete(
137+
ai_processor.process_topics(
138+
model="gemini-1.5-flash",
139+
api_key=api_key, # Use the API key from the request
140+
prompt=prompt,
141+
topics=[topic],
142+
search_term=search_term # Add the missing search_term parameter
143+
)
144+
)
145+
loop.close()
146+
147+
# print("Received explanation:", explanation)
148+
if explanation and len(explanation) > 0:
149+
return jsonify({"success": True, "explanation": explanation[0]})
150+
else:
151+
print("No explanation generated")
152+
return jsonify(
153+
{"success": False, "message": "Failed to generate explanation"}
154+
), 500
155+
156+
except Exception as ai_error:
157+
print("AI processing error:", str(ai_error))
158+
# Return a more detailed error response
159+
return jsonify({
160+
"success": False,
161+
"error": str(ai_error),
162+
"message": "Error during AI processing"
163+
}), 500
164+
165+
except Exception as e:
166+
print("Top-level error in explain-topic:", str(e))
167+
return jsonify(
168+
{
169+
"success": False,
170+
"error": str(e),
171+
"message": "An error occurred while generating the explanation",
172+
}
173+
), 500
174+
175+
176+
@app.route("/api/suggest-topics", methods=["GET"])
177+
def suggest_topics():
178+
try:
179+
query = request.args.get("query", "").lower().strip()
180+
if not query:
181+
return jsonify({
182+
"success": True,
183+
"suggestions": []
184+
})
185+
186+
# Use a more sophisticated query that:
187+
# 1. Matches topics containing the search term
188+
# 2. Prioritizes exact matches and high-frequency topics
189+
# 3. Uses word boundary matching for better relevance
190+
sql_query = """
191+
WITH ranked_topics AS (
192+
SELECT
193+
topic,
194+
COUNT(*) as count,
195+
CASE
196+
WHEN LOWER(topic) = ? THEN 3 -- Exact match gets highest priority
197+
WHEN LOWER(topic) LIKE ? THEN 2 -- Starts with query gets second priority
198+
ELSE 1 -- Contains query gets lowest priority
199+
END as match_priority
200+
FROM repo_topics
201+
WHERE LOWER(topic) LIKE ?
202+
GROUP BY topic
203+
)
204+
SELECT topic, count
205+
FROM ranked_topics
206+
ORDER BY match_priority DESC, count DESC
207+
LIMIT 10
208+
"""
209+
210+
# Prepare search patterns
211+
exact_match = query
212+
starts_with = f"{query}%"
213+
contains = f"%{query}%"
214+
215+
# Execute query with all patterns
216+
result = topic_service.con.execute(sql_query, [
217+
exact_match, # For exact match priority
218+
starts_with, # For starts-with priority
219+
contains # For contains match
220+
]).fetchall()
221+
222+
suggestions = [{"name": name.lower(), "count": count} for name, count in result]
223+
224+
return jsonify({
225+
"success": True,
226+
"suggestions": suggestions
227+
})
228+
229+
except Exception as e:
230+
print(f"Error in suggest-topics: {str(e)}") # Add logging
231+
return jsonify({
232+
"success": False,
233+
"error": str(e),
234+
"message": "An error occurred while getting suggestions"
235+
}), 500
236+
237+
238+
@app.route("/")
239+
def home():
240+
return "Hello World!"
241+
242+
243+
if __name__ == "__main__":
244+
# print("Starting Flask server...")
245+
port = 5002
246+
# print(f"Server running on: http://127.0.0.1:{port}")
247+
app.run(host="127.0.0.1", port=port, debug=True)

backend/app/services/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)