Skip to content

Commit 87b6c92

Browse files
committed
update readme
1 parent f54121f commit 87b6c92

6 files changed

Lines changed: 24 additions & 6 deletions

File tree

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
- **Versatile Applications**: Ready to use as a best-in-class reranker to improve editing outputs, or as a high-fidelity reward signal for **stable and effective Reinforcement Learning (RL) fine-tuning**.
2727

2828
## 🔥 News
29+
- **2025-09-30**: LoRA weights for OmniGen2, trained via Reinforcement Learning with EditScore-7B (Avg8) as the reward model, have been released on [Hugging Face]() and [ModelScope]().
2930
- **2025-09-30**: We are excited to release **EditScore** and **EditReward-Bench**! Model weights and the benchmark dataset are now publicly available. You can access them on Hugging Face: [Models Collection](https://huggingface.co/collections/EditScore/editscore-68d8e27ee676981221db3cfe) and [Benchmark Dataset](https://huggingface.co/datasets/EditScore/EditReward-Bench), and on ModelScope: [Models Collection](https://www.modelscope.cn/collections/EditScore-8b0d53aa945d4e) and [Benchmark Dataset](https://www.modelscope.cn/datasets/EditScore/EditReward-Bench).
3031

3132
## 📖 Introduction

editscore/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,8 @@ def evaluate(self, image_prompts, text_prompt):
129129
"consistency": np.mean([output_per_pass["consistency"] for output_per_pass in outputs_multi_pass]),
130130
"perceptual_quality": np.mean([output_per_pass["perceptual_quality"] for output_per_pass in outputs_multi_pass]),
131131
"overall": np.mean([output_per_pass["overall"] for output_per_pass in outputs_multi_pass]),
132+
"SC_reasoning": SC_dict["reasoning"],
133+
"PQ_reasoning": PQ_dict["reasoning"],
132134
}
133135
if self.reduction == "average_first":
134136
output["overall"] = math.sqrt(output["prompt_following"] * output["perceptual_quality"])

editscore/mllm_tools/qwen25vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def __init__(
4343
self.lora_path = lora_path
4444

4545
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
46-
vlm_model, dtype=torch.bfloat16, device_map="auto"
46+
vlm_model, torch_dtype=torch.bfloat16, device_map="auto"
4747
)
4848
if enable_lora:
4949
self.model = PeftModel.from_pretrained(self.model, lora_path)

evaluate.sh

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22
SHELL_FOLDER=$(cd "$(dirname "$0")";pwd)
33
cd $SHELL_FOLDER
44

5+
source "$(dirname $(which conda))/../etc/profile.d/conda.sh"
6+
conda activate py3.12+pytorch2.7.1+cu126
7+
58
python evaluation.py \
69
--benchmark_dir EditScore/EditReward-Bench \
710
--result_dir results/EditScore-7B \

evaluate_vllm.sh

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22
SHELL_FOLDER=$(cd "$(dirname "$0")";pwd)
33
cd $SHELL_FOLDER
44

5+
source "$(dirname $(which conda))/../etc/profile.d/conda.sh"
6+
conda activate py3.12+pytorch2.7.1+cu126
7+
58
python evaluation.py \
69
--benchmark_dir EditScore/EditReward-Bench \
710
--result_dir results/EditScore-7B \

evaluation.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def main(args):
228228
print("Writing results...", flush=True)
229229

230230
start_time = time.time()
231-
dataset = dataset.remove_columns(["input_image", "output_images"])
231+
# dataset = dataset.remove_columns(["input_image", "output_images"])
232232
for idx, data in enumerate(dataset):
233233
key1, key2 = data["key"]
234234
task_type = data["task_type"]
@@ -238,13 +238,22 @@ def main(args):
238238
score2 = all_scores[key2][dimension]
239239
data["score"] = [score1, score2]
240240

241+
input_image_path = os.path.join(args.result_dir, "input_images", f"{key1}_input.png")
242+
output_image_path1 = os.path.join(args.result_dir, "output_images", f"{key1}_output0.png")
243+
output_image_path2 = os.path.join(args.result_dir, "output_images", f"{key2}_output1.png")
244+
245+
data['input_image'].save(input_image_path)
246+
data['output_images'][0].save(output_image_path1)
247+
data['output_images'][1].save(output_image_path2)
248+
241249
json_line = {
242250
"key": (key1, key2),
243251
"idx": idx,
244-
"score": [
245-
score1,
246-
score2,
247-
],
252+
"score": [score1, score2],
253+
"SC_reasoning": [all_scores[key1]["SC_reasoning"], all_scores[key2]["SC_reasoning"]],
254+
"PQ_reasoning": [all_scores[key1]["PQ_reasoning"], all_scores[key2]["PQ_reasoning"]],
255+
"input_image": input_image_path,
256+
"output_images": [output_image_path1, output_image_path2],
248257
}
249258

250259
save_file = os.path.join(

0 commit comments

Comments
 (0)