-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathregenerateAndTest.sh
More file actions
executable file
·70 lines (53 loc) · 2.46 KB
/
regenerateAndTest.sh
File metadata and controls
executable file
·70 lines (53 loc) · 2.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#!/bin/bash
set -ex
ruff format openworm_ai/*.py openworm_ai/*/*.py openworm_ai/*/*/*.py
ruff check openworm_ai/*.py openworm_ai/*/*.py openworm_ai/*/*/*.py
pip install .[dev]
if [ $1 == "-llamaparse" ]; then
python -m openworm_ai.parser.llamaparse_backend
elif [ $1 == "-quiz" ]; then
python -m openworm_ai.quiz.QuizMaster 10
python -m openworm_ai.quiz.QuizMaster -ask
python -m openworm_ai.quiz.QuizMaster -ask -o-t
elif [ "$1" == "-qplot" ]; then
python -m openworm_ai.quiz.figures.quizplots_overcategories -nogui
python -m openworm_ai.quiz.figures.quizplot_grid -nogui
python -m openworm_ai.quiz.figures.quizplots -nogui
elif [ "$1" == "-llm" ]; then
python -m openworm_ai.utils.llms # default - ChatGPT via API
python -m openworm_ai.utils.llms -hf-qwen # Qwen via HuggingFace API
python -m openworm_ai.utils.llms -co # Cohere via API - free
python -m openworm_ai.utils.llms -g25 # gemini-2.5-flash via API - free tier
python -m openworm_ai.utils.llms -o-l323b # Ollama:llama3.2:3b
python -m openworm_ai.utils.llms -ge2 # Ollama:gemini2:latest
python -m openworm_ai.utils.llms -o-qw # Ollama:qwen3:1.7b
python -m openworm_ai.utils.llms -hf-mistral #HF: mistral 7b
python -m openworm_ai.utils.llms -hf-llama32 #HF: Llama 32 3b
python -m openworm_ai.utils.llms -hf-qwen #HF: qwen.2.5 7b
python -m openworm_ai.utils.llms -hf-gemma2 #HF: gemma 2 9b
#ADD HUGGING FACE OPTIONS
else
python -m openworm_ai.parser.DocumentModels
python -m openworm_ai.quiz.QuizModel
python -m openworm_ai.quiz.figures.quizplot_grid -nogui
python -m openworm_ai.parser.ParseWormAtlas
#Do not call LlamaParse; use existing parsed outputs
if [ "$1" == "-free" ]; then
python -m openworm_ai.parser.ParseLlamaIndexJson --skip
python -m openworm_ai.graphrag.GraphRAG_test -test
python -m corpus.papers.enrich_source_registry
#Force full rebuild of raw/processed outputs
elif [ "$1" == "-reparse-all" ]; then
python -m openworm_ai.parser.ParseLlamaIndexJson --reparse-all
python -m openworm_ai.graphrag.GraphRAG_test $@
python -m corpus.papers.enrich_source_registry
#Default: incremental parse + monthly refresh (30 days)
else
python -m openworm_ai.parser.ParseLlamaIndexJson --skip
python -m openworm_ai.graphrag.GraphRAG_test $@
python -m corpus.papers.enrich_source_registry
fi
fi
echo
echo " Success!"
echo