backup / reform_dataset.py
MatchLab's picture
Upload folder using huggingface_hub
6f89716 verified
import os
import json
import time
import random
from typing import Dict, Any, List, Optional, Tuple
from openai import OpenAI
import multiprocessing as mp
# -------------------------
# IO helpers
# -------------------------
def load_json(file_path: str):
with open(file_path, "r", encoding="utf-8") as f:
return json.load(f)
def append_jsonl(path: str, obj: Dict[str, Any]):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "a", encoding="utf-8") as f:
f.write(json.dumps(obj, ensure_ascii=False) + "\n")
def load_done_keys(jsonl_path: str):
done = set()
if not os.path.exists(jsonl_path):
return done
with open(jsonl_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
try:
obj = json.loads(line)
k = obj.get("key")
if k is not None:
done.add(k)
except Exception:
continue
return done
# -------------------------
# LLM helpers
# -------------------------
def safe_json_parse(text: str) -> Optional[Dict[str, Any]]:
text = (text or "").strip()
if text.startswith("```"):
text = text.strip("`").strip()
if text.lower().startswith("json"):
text = text[4:].strip()
try:
return json.loads(text)
except Exception:
return None
def call_chat_json(
client: OpenAI,
model: str,
system: str,
user: str,
max_tokens: int,
temperature: float,
top_p: float = 0.9,
max_retries: int = 4,
) -> Dict[str, Any]:
last_err = None
for attempt in range(1, max_retries + 1):
try:
resp = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": user},
],
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
)
text = resp.choices[0].message.content
obj = safe_json_parse(text)
if obj is None:
raise ValueError(f"JSON parse failed. Raw: {text[:200]}...")
return obj
except Exception as e:
last_err = str(e)
time.sleep(1.0 * attempt)
raise RuntimeError(f"call_chat_json failed after {max_retries} retries. Last error: {last_err}")
def normalize_text(s: str) -> str:
return " ".join((s or "").lower().split())
# -------------------------
# Binary question detection
# -------------------------
_BINARY_ANS = {"yes", "no", "true", "false"}
_AUX_START = {
"is", "are", "was", "were",
"do", "does", "did",
"can", "could",
"will", "would", "should",
"has", "have", "had",
"may", "might", "must",
}
def is_binary_qa(question: str, gt_answer: str) -> bool:
a = normalize_text(gt_answer)
if a in _BINARY_ANS:
return True
q = (question or "").strip().lower()
if q.endswith("?"):
first = q.split()[0] if q.split() else ""
if first in _AUX_START:
return True
return False
# -------------------------
# Round 1: Q/A -> correct statement
# -------------------------
SYSTEM_STATEMENT = """Convert a VQA (question + ground-truth answer) into ONE declarative sentence.
Return STRICT JSON: {"statement": "..."} only.
Rules:
- Must be accurate given the answer.
- Natural, concise, factual.
- Preserve referenced entity labels if any (e.g., "chair A").
- JSON only.
"""
USER_STATEMENT = """Question: {question}
Ground-truth answer: {answer}
Return JSON only.
"""
def gen_statement(client: OpenAI, model: str, question: str, answer: str) -> str:
obj = call_chat_json(
client=client,
model=model,
system=SYSTEM_STATEMENT,
user=USER_STATEMENT.format(question=question, answer=answer),
max_tokens=128,
temperature=0.3,
)
st = obj.get("statement", "")
if not isinstance(st, str) or not st.strip():
raise ValueError(f"Bad statement: {obj}")
return st.strip()
# -------------------------
# Round 2: correct statement -> wrong options (N=9 default, N=1 for binary)
# -------------------------
SYSTEM_WRONG_9 = """
You generate distractors for a ground-truth statement.
OUTPUT MUST BE STRICT JSON ONLY:
{{"distractors": ["<s1>", "<s2>", "<s3>", "<s4>", "<s5>", "<s6>", "<s7>", "<s8>", "<s9>"]}}
Rules:
- Exactly 9 strings, all unique.
- Each is a single declarative sentence.
- Each must be WRONG given the ground-truth answer.
- Preserve the same main subject/entity (same instance label if present).
- The distractors should be diverse, non-ambiguous, realistic. The difference between ground-truth and distractors should be VERY clear.
- No markdown, no extra keys.
"""
USER_WRONG_9 = """INPUT
Question: {question}
Ground-truth answer: {answer}
Ground-truth statement: {statement}
TASK
Generate 9 WRONG but plausible alternative declarative statements.
REMINDER
Output JSON only with the exact schema:
{{"distractors": ["...","...","...","...","...","...","...","...","..."]}}
"""
SYSTEM_WRONG_1 = """
You generate distractors for a ground-truth statement.
OUTPUT MUST BE STRICT JSON ONLY:
{{"distractors": ["<s1>"]}}
Rules:
- Exactly 1 string.
- Must be a single declarative sentence.
- Must be WRONG given the ground-truth answer.
- Preserve the same main subject/entity (same instance label if present).
- Make the wrong answer VERY clear (not ambiguous).
- No markdown, no extra keys.
"""
USER_WRONG_1 = """INPUT
Question: {question}
Ground-truth answer: {answer}
Ground-truth statement: {statement}
TASK
Generate 1 WRONG but plausible alternative declarative statement.
REMINDER
Output JSON only with the exact schema:
{{"distractors": ["..."]}}
"""
def gen_wrong_sentences(
client: OpenAI,
model: str,
question: str,
answer: str,
statement: str,
n: int = 9,
max_rounds: int = 5,
) -> List[str]:
target = n
collected: List[str] = []
seen = set([normalize_text(statement)]) # disallow matching correct
if n == 1:
system = SYSTEM_WRONG_1
user_tmpl = USER_WRONG_1
max_tokens = 256
temperature = 0.7
else:
system = SYSTEM_WRONG_9
user_tmpl = USER_WRONG_9
max_tokens = 512
temperature = 0.85
for _round in range(max_rounds):
obj = call_chat_json(
client=client,
model=model,
system=system,
user=user_tmpl.format(question=question, answer=answer, statement=statement),
max_tokens=max_tokens,
temperature=temperature,
)
ds = obj.get("distractors", None)
if not isinstance(ds, list):
continue
for x in ds:
if not isinstance(x, str):
continue
x = x.strip()
if not x:
continue
nx = normalize_text(x)
if nx in seen:
continue
seen.add(nx)
collected.append(x)
if len(collected) >= target:
return collected[:target]
raise ValueError(f"Could not collect {target} unique distractors; got {len(collected)}")
# -------------------------
# Build final options
# -------------------------
def build_mcq(correct: str, wrongs: List[str], seed: Optional[int] = None):
options = [correct] + list(wrongs)
if seed is not None:
rnd = random.Random(seed)
rnd.shuffle(options)
else:
random.shuffle(options)
label = options.index(correct)
return options, label
# -------------------------
# Multiprocessing worker
# -------------------------
_WORKER_CLIENT = None
_WORKER_MODEL = None
def _init_worker(base_url: str, model_name: str, timeout: int = 3600):
"""Initializer runs once per process."""
global _WORKER_CLIENT, _WORKER_MODEL
_WORKER_MODEL = model_name
_WORKER_CLIENT = OpenAI(api_key="EMPTY", base_url=base_url, timeout=timeout)
def _process_one(args: Tuple[str, int, str, str, str]) -> Dict[str, Any]:
"""
Args:
key, idx, scan_id, question, gt_answer
Returns:
{"ok": True, "out": {...}} or {"ok": False, "err": {...}}
"""
key, idx, scan_id, question, gt_answer = args
try:
correct_stmt = gen_statement(_WORKER_CLIENT, _WORKER_MODEL, question, gt_answer)
n_wrong = 1 if is_binary_qa(question, gt_answer) else 9
wrongs = gen_wrong_sentences(
_WORKER_CLIENT,
_WORKER_MODEL,
question,
gt_answer,
correct_stmt,
n=n_wrong,
max_rounds=6 if n_wrong == 9 else 4,
)
seed = abs(hash(key)) % (2**32)
options, label = build_mcq(correct_stmt, wrongs, seed=seed)
out = {
"key": key,
"scan_id": scan_id,
"question": question,
"gt_answer": gt_answer,
"correct": correct_stmt,
"options": options, # length 2 for binary, length 10 otherwise
"label": label,
"is_binary": (n_wrong == 1),
}
return {"ok": True, "out": out}
except Exception as e:
return {"ok": False, "err": {
"key": key,
"scan_id": scan_id,
"question": question,
"gt_answer": gt_answer,
"error": str(e),
}}
# -------------------------
# Main
# -------------------------
def main():
base_url = "http://lrc-alpha-sg-gpu06:22001/v1"
model_name = "Qwen/Qwen3-VL-8B-Instruct"
in_path = "/home/m50048399/transfered/ye_project/PointMapVerse/existing_datasets/ScanNet/annotations/qa/ScanQA_v1.0_val.json"
out_jsonl = "/home/m50048399/transfered/ye_project/PointMapVerse/derived/scanqa_sentence_mcq_val.jsonl"
err_jsonl = out_jsonl + ".errors.jsonl"
data = load_json(in_path)
done = load_done_keys(out_jsonl)
print(f"Loaded {len(data)} items. Already done: {len(done)}")
# Build task list
tasks = []
for idx, item in enumerate(data):
scan_id = item.get("scene_id", "") # keep your current field
question = (item.get("question") or "").strip()
answers = item.get("answers") or []
gt_answer = (answers[0] if answers else "").strip()
if not question or not gt_answer:
continue
key = f"{scan_id}::{idx}"
if key in done:
continue
tasks.append((key, idx, scan_id, question, gt_answer))
print(f"To process: {len(tasks)}")
# Multiprocessing: 8 workers
ctx = mp.get_context("spawn")
with ctx.Pool(
processes=8,
initializer=_init_worker,
initargs=(base_url, model_name, 3600),
maxtasksperchild=50,
) as pool:
processed = 0
ok_cnt = 0
err_cnt = 0
for res in pool.imap_unordered(_process_one, tasks, chunksize=4):
processed += 1
if res["ok"]:
append_jsonl(out_jsonl, res["out"])
ok_cnt += 1
else:
append_jsonl(err_jsonl, res["err"])
err_cnt += 1
if processed % 100 == 0:
print(f"Finished {processed}/{len(tasks)} | ok={ok_cnt} err={err_cnt}")
print("Done.")
if __name__ == "__main__":
main()