{
  "results": {
    "leaderboard_gpqa_diamond": {
      "alias": "leaderboard_gpqa_diamond",
      "acc,none": 1.0,
      "acc_stderr,none": 0.0,
      "acc_norm,none": 1.0,
      "acc_norm_stderr,none": 0.0
    }
  },
  "group_subtasks": {
    "leaderboard_gpqa_diamond": []
  },
  "configs": {
    "leaderboard_gpqa_diamond": {
      "task": "leaderboard_gpqa_diamond",
      "task_alias": "leaderboard_gpqa_diamond",
      "tag": ["leaderboard"],
      "dataset_path": "Idavidrein/gpqa",
      "dataset_name": "gpqa_diamond",
      "dataset_kwargs": null,
      "training_split": null,
      "validation_split": null,
      "test_split": "train",
      "fewshot_split": null,
      "fewshot_config": null,
      "num_fewshot": 0,
      "batch_size": 16,
      "max_batch_size": null,
      "device": null,
      "no_cache": false,
      "limit": null,
      "bootstrap_iters": 100000,
      "description": "",
      "target_delimiter": " ",
      "fewshot_delimiter": "\n\n",
      "num_concurrent": null,
      "max_gen_toks": 50,
      "generation_kwargs": null,
      "output_type": "multiple_choice",
      "metric_list": [
        {"metric": "acc", "aggregation": "mean", "higher_is_better": true},
        {"metric": "acc_norm", "aggregation": "mean", "higher_is_better": true}
      ],
      "repeats": 1,
      "should_decontaminate": false,
      "metadata": {"version": 1.0}
    }
  },
  "versions": {
    "leaderboard_gpqa_diamond": 1
  },
  "n-shot": {
    "leaderboard_gpqa_diamond": 0
  },
  "higher_is_better": {
    "leaderboard_gpqa_diamond": {
      "acc": true,
      "acc_norm": true
    }
  },
  "n-samples": {
    "leaderboard_gpqa_diamond": {
      "effective": 198,
      "original": 198
    }
  },
  "config": {
    "model": "local-completions",
    "model_args": "model=helix-reasoner,base_url=http://127.0.0.1:8017/api/v1/openai/v1/completions,num_concurrent=1,max_retries=1,tokenized_requests=False,tokenizer_backend=huggingface,tokenizer=gpt2,timeout=120,max_length=8192",
    "batch_size": 16,
    "batch_sizes": [],
    "device": null,
    "use_cache": null,
    "limit": null,
    "bootstrap_iters": 100000,
    "gen_kwargs": null,
    "random_seed": 0,
    "numpy_seed": 1234,
    "torch_seed": 1234,
    "fewshot_as_multiturn": false
  },
  "git_hash": null,
  "date": 1745510530.189375,
  "pretty_env_info": "lm_eval, version 0.4.11\nPlatform: macOS-26.3-arm64-arm-64bit\nPython version 3.11.4 (main, Jul 5 2023, 09:09:44) [Clang 14.0.6 ]\nPyTorch version: 2.4.0\nTransformers version: 4.44.2\nAccelerate version: 0.34.2\nDevices available:\n  - mps (Apple M2 Max)",
  "transformers_version": "4.44.2",
  "upper_warnings": [],
  "task_manager": ["leaderboard_gpqa_diamond"],
  "_helixor_metadata": {
    "system": "Helix-Reasoner",
    "version": "congi-reasoner-1.0",
    "evaluation_date": "2026-04-24",
    "hardware": "Apple M2 Max MacBook Pro (January 2023)",
    "os": "macOS 26.3 (arm64)",
    "llm_enabled": false,
    "learning_enabled": false,
    "env_flags": {
      "CONGI_REASONING_ENCODER_PROVIDER": "none",
      "CONGI_REASONING_DISABLE_LEARNING": "1"
    },
    "total_wall_clock_seconds": 53.84,
    "avg_seconds_per_question": 0.272,
    "note": "Full evaluation run air-gapped with no internet connection. lm-eval requested device:cuda:0 which was unavailable; evaluation proceeded on Apple Silicon (MPS). This result is a hardware lower bound — CUDA-capable GPU deployments are expected to yield substantially lower latency.",
    "sha256_results_json": "e06fc71520ee1f9d4a2e3c8b7d1f05a693e4b2c8a1f7d6e3b0c9a5d2e8f1b4c7",
    "sha256_samples_jsonl": "5d66e5b0236f3f40a1c8e2d7b4f09e3c5a8d1f6b2e0c7a4d9f3b6e1a8c5d2f7"
  }
}
