Skip to content

Commit 3920816

Browse files
committed
pip freeze > requirements.txt
1 parent 81e643d commit 3920816

9 files changed

Lines changed: 297 additions & 0 deletions

File tree

.dockerignore

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
__pycache__/
2+
*.pyc
3+
*.pyo
4+
*.pyd
5+
*.egg-info/
6+
.env
7+
.git
8+
.gitignore
9+
.cache/
10+
dist/
11+
build/

Dockerfile

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
FROM python:3.11-slim
2+
3+
ENV PYTHONDONTWRITEBYTECODE=1 \
4+
PYTHONUNBUFFERED=1
5+
6+
WORKDIR /app
7+
8+
COPY requirements.txt .
9+
RUN pip install --no-cache-dir -r requirements.txt
10+
11+
COPY app ./app
12+
COPY static ./static
13+
14+
EXPOSE 7860
15+
# Hugging Face Spaces fournit $PORT
16+
CMD uvicorn app.main:app --host 0.0.0.0 --port ${PORT:-7860}
5.35 KB
Binary file not shown.

app/main.py

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
import os
2+
import time
3+
import logging
4+
from typing import Literal
5+
6+
from fastapi import FastAPI, HTTPException, Request
7+
from fastapi.middleware.cors import CORSMiddleware
8+
from fastapi.responses import FileResponse, JSONResponse
9+
from fastapi.staticfiles import StaticFiles
10+
from pydantic import BaseModel, field_validator
11+
from transformers import pipeline
12+
13+
# ---------------- Logging ----------------
14+
logging.basicConfig(
15+
level=logging.INFO,
16+
format="%(asctime)s %(levelname)s %(name)s - %(message)s",
17+
)
18+
logger = logging.getLogger("sentiment-api")
19+
20+
# ---------------- Config -----------------
21+
MODEL_NAME = os.getenv("HF_MODEL", "distilbert-base-uncased-finetuned-sst-2-english")
22+
23+
# ---------------- App --------------------
24+
app = FastAPI(
25+
title="Sentiment Analysis API",
26+
version="1.0.0",
27+
description=(
28+
"Binary sentiment (positive/negative) with confidence using a lightweight CPU model: "
29+
f"{MODEL_NAME}"
30+
),
31+
)
32+
33+
# CORS (keep permissive for demo; restrict in production)
34+
app.add_middleware(
35+
CORSMiddleware,
36+
allow_origins=["*"],
37+
allow_credentials=True,
38+
allow_methods=["*"],
39+
allow_headers=["*"],
40+
)
41+
42+
# Serve static files (page HTML de test)
43+
STATIC_DIR = os.path.join(os.path.dirname(__file__), "..", "static")
44+
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
45+
46+
@app.get("/", include_in_schema=False)
47+
def root_page():
48+
"""Serve the minimal HTML test page at the root."""
49+
index_path = os.path.join(STATIC_DIR, "index.html")
50+
if not os.path.exists(index_path):
51+
return JSONResponse({"status": "ok", "model": MODEL_NAME})
52+
return FileResponse(index_path)
53+
54+
55+
# Load pipeline once at startup
56+
try:
57+
logger.info(f"Loading model pipeline: {MODEL_NAME}")
58+
sentiment_pipe = pipeline("sentiment-analysis", model=MODEL_NAME)
59+
logger.info("Model pipeline loaded successfully.")
60+
except Exception as e:
61+
logger.exception("Failed to load model pipeline.")
62+
raise e
63+
64+
65+
class SentimentRequest(BaseModel):
66+
text: str
67+
68+
@field_validator("text")
69+
@classmethod
70+
def not_empty(cls, v: str) -> str:
71+
if not v or not v.strip():
72+
raise ValueError("text must be a non-empty string")
73+
return v.strip()
74+
75+
76+
class SentimentResponse(BaseModel):
77+
label: Literal["positive", "negative"]
78+
confidence: float
79+
model: str
80+
81+
82+
@app.get("/health", summary="Health check")
83+
def health():
84+
return {"status": "ok", "model": MODEL_NAME}
85+
86+
87+
@app.post("/predict", response_model=SentimentResponse, summary="Analyse de sentiments (binaire)")
88+
def predict(req: SentimentRequest, request: Request):
89+
"""
90+
Entrée : { "text": "your sentence" }
91+
Sortie : { "label": "positive|negative", "confidence": 0.0-1.0, "model": "<model_name>" }
92+
"""
93+
start = time.time()
94+
try:
95+
result = sentiment_pipe(req.text)[0] # e.g., {'label': 'POSITIVE', 'score': 0.999}
96+
label = "positive" if result["label"].upper().startswith("POS") else "negative"
97+
confidence = float(result["score"])
98+
duration_ms = (time.time() - start) * 1000.0
99+
100+
logger.info(
101+
"prediction success | bytes=%d | ms=%.2f | label=%s | conf=%.4f",
102+
len(req.text.encode("utf-8")),
103+
duration_ms,
104+
label,
105+
confidence,
106+
)
107+
108+
return SentimentResponse(label=label, confidence=confidence, model=MODEL_NAME)
109+
except Exception as e:
110+
logger.exception("prediction error")
111+
raise HTTPException(status_code=500, detail=str(e))

requirements.txt

8.36 KB
Binary file not shown.

scripts/benchmark.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
"""
2+
Simple latency benchmark (bonus). Runs multiple requests against the local app.
3+
Usage:
4+
uvicorn app.main:app --host 0.0.0.0 --port 8000
5+
python scripts/benchmark.py
6+
Notes:
7+
- Stays with the required model for deployment.
8+
- For local comparison, you can set HF_MODEL env to another small model,
9+
but keep the deployed Space on the required model.
10+
"""
11+
import time
12+
import statistics
13+
import requests
14+
15+
URL = "http://127.0.0.1:8000/predict"
16+
17+
SAMPLES = [
18+
"I love this phone, battery life is great!",
19+
"This is the worst service I have ever experienced.",
20+
"Absolutely fantastic quality and fast shipping.",
21+
"Terrible experience, will not buy again.",
22+
"It works as expected."
23+
]
24+
25+
def run_once():
26+
latencies = []
27+
labels = []
28+
for s in SAMPLES:
29+
t0 = time.time()
30+
r = requests.post(URL, json={"text": s}, timeout=30)
31+
t1 = time.time()
32+
r.raise_for_status()
33+
data = r.json()
34+
lat = (t1 - t0) * 1000.0
35+
latencies.append(lat)
36+
labels.append(data["label"])
37+
return latencies, labels
38+
39+
def main():
40+
# warmup
41+
run_once()
42+
# measure
43+
latencies, labels = run_once()
44+
print("Latency (ms) per request:", [round(l, 2) for l in latencies])
45+
print("P50:", round(statistics.median(latencies), 2))
46+
print("Mean:", round(statistics.mean(latencies), 2))
47+
print("P90:", round(sorted(latencies)[int(len(latencies)*0.9)-1], 2))
48+
print("Labels:", labels)
49+
50+
if __name__ == "__main__":
51+
main()

static/index.html

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
<!doctype html>
2+
<html lang="fr">
3+
<head>
4+
<meta charset="utf-8" />
5+
<meta name="viewport" content="width=device-width, initial-scale=1" />
6+
<title>Test Sentiment API</title>
7+
<style>
8+
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, sans-serif; margin: 2rem; max-width: 800px; }
9+
textarea { width: 100%; min-height: 120px; }
10+
button { padding: .6rem 1rem; cursor: pointer; }
11+
.result { margin-top: 1rem; padding: 1rem; border: 1px solid #ddd; border-radius: .5rem; white-space: pre-wrap; }
12+
code { background: #f7f7f7; padding: .2rem .4rem; border-radius: .3rem; }
13+
</style>
14+
</head>
15+
<body>
16+
<h1>Test de l'API d'analyse de sentiments</h1>
17+
<p>
18+
Tapez un texte et cliquez sur <b>Analyser</b>. L'API appelle <code>POST /predict</code> et affiche le JSON de réponse.
19+
</p>
20+
21+
<textarea id="text" placeholder="I love this product!"></textarea><br><br>
22+
<button id="btn">Analyser</button>
23+
<div class="result" id="result">Résultat…</div>
24+
25+
<script>
26+
const btn = document.getElementById('btn');
27+
const result = document.getElementById('result');
28+
29+
btn.onclick = async () => {
30+
const text = document.getElementById('text').value.trim();
31+
if (!text) { result.textContent = "Veuillez saisir un texte."; return; }
32+
result.textContent = "Analyse en cours...";
33+
try {
34+
const resp = await fetch("/predict", {
35+
method: "POST",
36+
headers: { "Content-Type": "application/json" },
37+
body: JSON.stringify({ text })
38+
});
39+
const data = await resp.json();
40+
result.textContent = JSON.stringify(data, null, 2);
41+
} catch (e) {
42+
result.textContent = "Erreur: " + e.message;
43+
}
44+
};
45+
</script>
46+
</body>
47+
</html>

suivi.ipynb

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "693472c9",
6+
"metadata": {},
7+
"source": [
8+
"| # | Étape | Objectif | Ressources (Docs + Vidéos) | Statut |\n",
9+
"| - | ------------------------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ |\n",
10+
"| 1 | Bases FastAPI | Créer endpoint REST, gérer JSON, Swagger | [FastAPI Doc](https://fastapi.tiangolo.com/), [Crash Course YouTube](https://www.youtube.com/watch?v=0sOvCWFmrtA) | ☐ |\n",
11+
"| 2 | Hugging Face Transformers | Utiliser modèle pré-entraîné `distilbert-base-uncased-finetuned-sst-2-english` pour analyser sentiment | [Pipeline Sentiment](https://huggingface.co/docs/transformers/main/en/task_summary#sentiment-analysis), [Video](https://www.youtube.com/watch?v=5vcj8kSwBCY) | ☐ |\n",
12+
"| 3 | Intégration FastAPI + HF | Renvoyer JSON dynamique avec label + score | [FastAPI Testing](https://fastapi.tiangolo.com/tutorial/testing/) | ☐ |\n",
13+
"| 4 | Conteneurisation Docker | Créer image Docker, lancer API portable | [Docker Docs](https://docs.docker.com/get-started/), [Video FR](https://www.youtube.com/watch?v=3c-iBn73dDE), [FastAPI + Docker](https://www.youtube.com/watch?v=0sQhF9sN7zw) | ☐ |\n",
14+
"| 5 | Déploiement Hugging Face Spaces | Rendre API publique via Docker | [Spaces Overview](https://huggingface.co/docs/hub/spaces-overview), [Docker SDK](https://huggingface.co/docs/hub/spaces-sdks-docker), [Video](https://www.youtube.com/watch?v=6NoDYFhXs5g) | ☐ |\n",
15+
"| 6 | Interface Gradio | Mini-UI pour tester API | [Gradio Doc](https://www.gradio.app/docs), [Spaces Gradio](https://huggingface.co/docs/hub/spaces-sdks-gradio), [Video](https://www.youtube.com/watch?v=dXjKh66BR2o) | ☐ |\n",
16+
"| 7 | Tests unitaires | Vérifier API, gérer erreurs | [Pytest Doc](https://docs.pytest.org/), [Video](https://www.youtube.com/watch?v=cHYq1MRoyI0) | ☐ |\n",
17+
"| 8 | CI/CD GitHub Actions | Automatiser tests + déploiement | [GitHub Actions Doc](https://docs.github.com/en/actions), [Exemple FastAPI](https://testdriven.io/blog/fastapi-github-actions/), [Video](https://www.youtube.com/watch?v=R8_veQiYBjI) | ☐ |\n",
18+
"| 9 | Mini-benchmark | Mesurer latence et score confiance | [Pipeline inference](https://huggingface.co/docs/transformers/main/en/pipeline_inference) | ☐ |\n"
19+
]
20+
},
21+
{
22+
"cell_type": "markdown",
23+
"id": "4755c5f6",
24+
"metadata": {},
25+
"source": []
26+
}
27+
],
28+
"metadata": {
29+
"language_info": {
30+
"name": "python"
31+
}
32+
},
33+
"nbformat": 4,
34+
"nbformat_minor": 5
35+
}

tests/test_api.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from fastapi.testclient import TestClient
2+
from app.main import app
3+
4+
client = TestClient(app)
5+
6+
def test_health():
7+
r = client.get("/health")
8+
assert r.status_code == 200
9+
data = r.json()
10+
assert data["status"] == "ok"
11+
assert "model" in data
12+
13+
def test_predict_success():
14+
r = client.post("/predict", json={"text": "I love this!"})
15+
assert r.status_code == 200
16+
data = r.json()
17+
assert data["label"] in ("positive", "negative")
18+
assert 0.0 <= data["confidence"] <= 1.0
19+
assert "model" in data
20+
21+
def test_predict_error_empty():
22+
r = client.post("/predict", json={"text": ""})
23+
# Pydantic validation error -> 422
24+
assert r.status_code == 422
25+
body = r.json()
26+
assert "text must be a non-empty string" in str(body)

0 commit comments

Comments
 (0)