{
  "benchmark_version": "0.3.0",
  "ogs_version": "0.2.0",
  "run_id": "7f02e637-4359-4b9d-a380-8a6f71276594",
  "run_at": "2026-04-18T06:13:46.550621+00:00",
  "model": {
    "id": "gpt-4.1-mini",
    "adapter": "openai",
    "options": {}
  },
  "config": {
    "samples": 3,
    "closed_book": false,
    "seed": null,
    "skip_disputed": true
  },
  "environment": {
    "python": "3.14.3",
    "platform": "macOS-26.4.1-arm64-arm-64bit-Mach-O",
    "sdk_versions": {
      "openai": "2.32.0",
      "jsonschema": "4.26.0"
    }
  },
  "reproducibility": {
    "task_set_sha": "345fbe2cedeac504",
    "task_count": 213,
    "prompt_hashes": {
      "ogs:core:benchmark-task:comp-001": "5dc05e883ee75cf7",
      "ogs:core:benchmark-task:comp-002": "d5fef8891eee5391",
      "ogs:core:benchmark-task:comp-003": "7b7db2c313e1d55b",
      "ogs:core:benchmark-task:comp-004": "ca8109a73af762fa",
      "ogs:core:benchmark-task:comp-005": "2885728399552fb5",
      "ogs:core:benchmark-task:comp-006": "bb37bf992b2f6c39",
      "ogs:core:benchmark-task:comp-007": "59ae41132f38bbfd",
      "ogs:core:benchmark-task:comp-008": "5121ce4bf46aa4d5",
      "ogs:core:benchmark-task:comp-009": "2a8a86bb58372ed7",
      "ogs:core:benchmark-task:comp-010": "d420afcaea603a0e",
      "ogs:core:benchmark-task:comp-011": "1d18b06eac9f3ddc",
      "ogs:core:benchmark-task:comp-012": "fa91f74e86a4cffb",
      "ogs:core:benchmark-task:comp-013": "f8257062be99b5d3",
      "ogs:core:benchmark-task:comp-014": "114b0796c10cf141",
      "ogs:core:benchmark-task:comp-015": "07976f0ee97244e5",
      "ogs:core:benchmark-task:comp-016": "609a0643399a8df0",
      "ogs:core:benchmark-task:comp-017": "09a6e47eda264ded",
      "ogs:core:benchmark-task:comp-018": "32d4a921f4fbdd98",
      "ogs:core:benchmark-task:comp-019": "eaa27377a264e2e2",
      "ogs:core:benchmark-task:comp-020": "535bf59391539a15",
      "ogs:core:benchmark-task:diet-001": "18b5c9c7c958a521",
      "ogs:core:benchmark-task:diet-002": "04840e46f80ccb5b",
      "ogs:core:benchmark-task:diet-003": "013d6295fee472b5",
      "ogs:core:benchmark-task:diet-004": "85fc94958c4357ef",
      "ogs:core:benchmark-task:diet-005": "f40f4c25a2ee4b71",
      "ogs:core:benchmark-task:diet-006": "7f45e388301a68a5",
      "ogs:core:benchmark-task:diet-007": "eb092a2c162960f6",
      "ogs:core:benchmark-task:diet-008": "480be99b3f35a51d",
      "ogs:core:benchmark-task:diet-009": "936752891554dba3",
      "ogs:core:benchmark-task:diet-010": "35c29846f76046d7",
      "ogs:core:benchmark-task:diet-011": "bdd7970997c11172",
      "ogs:core:benchmark-task:diet-012": "42c897d741064981",
      "ogs:core:benchmark-task:menu-001": "138c09ed611a2d43",
      "ogs:core:benchmark-task:menu-002": "b5df321fd829de76",
      "ogs:core:benchmark-task:menu-003": "9795b72bdda437f1",
      "ogs:core:benchmark-task:menu-004": "fc0da6dcbbc842fb",
      "ogs:core:benchmark-task:menu-005": "f33710f88f89121e",
      "ogs:core:benchmark-task:menu-006": "7bb019ca9ca4b1be",
      "ogs:core:benchmark-task:menu-007": "68b7fb2ec1e432f4",
      "ogs:core:benchmark-task:menu-008": "dc5bf7ed3a0583c2",
      "ogs:core:benchmark-task:menu-009": "8883cacb0aca5024",
      "ogs:core:benchmark-task:ml-ar-001": "94e19d81d80368a1",
      "ogs:core:benchmark-task:ml-ar-002": "6b39ec376be689ad",
      "ogs:core:benchmark-task:ml-es-001": "5fe5c7f1f85e3675",
      "ogs:core:benchmark-task:ml-es-002": "9f03b90711bec40f",
      "ogs:core:benchmark-task:ml-fr-001": "8d252ae3fb170e15",
      "ogs:core:benchmark-task:ml-fr-002": "96a07aeab44536ab",
      "ogs:core:benchmark-task:ml-it-001": "c3aae88ebfcbe08b",
      "ogs:core:benchmark-task:ml-it-002": "30919f613b1fbcd6",
      "ogs:core:benchmark-task:ml-ja-001": "ff1f43723314705c",
      "ogs:core:benchmark-task:ml-ja-002": "f68d4882814e0ecf",
      "ogs:core:benchmark-task:nut-001": "94fadc6f5c8c5968",
      "ogs:core:benchmark-task:nut-002": "4b141daaadd9af57",
      "ogs:core:benchmark-task:nut-003": "518ee70ef096f9ec",
      "ogs:core:benchmark-task:nut-004": "42947503c82b308d",
      "ogs:core:benchmark-task:nut-005": "375ca5d44d346632",
      "ogs:core:benchmark-task:nut-006": "1ba6175ed99ad486",
      "ogs:core:benchmark-task:nut-007": "b9e5625ed0828c94",
      "ogs:core:benchmark-task:nut-008": "95ef839d58e96bbc",
      "ogs:core:benchmark-task:nut-009": "641416c8b33c6dbc",
      "ogs:core:benchmark-task:nut-010": "fd7edc20921469ff",
      "ogs:core:benchmark-task:pair-001": "3fa6a67d5bfe7f1a",
      "ogs:core:benchmark-task:pair-002": "4b85899cd6206acb",
      "ogs:core:benchmark-task:pair-003": "41ed3ee840823502",
      "ogs:core:benchmark-task:pair-004": "82a4b41d0e3d5668",
      "ogs:core:benchmark-task:pair-005": "441705237fe65e54",
      "ogs:core:benchmark-task:pair-006": "b195ee1dfe7968ab",
      "ogs:core:benchmark-task:pair-007": "81bf14f26f4fa761",
      "ogs:core:benchmark-task:pair-008": "51ad14586a063779",
      "ogs:core:benchmark-task:pair-009": "d7ade64ce48bfbe4",
      "ogs:core:benchmark-task:pair-010": "637717fdf3577204",
      "ogs:core:benchmark-task:pair-011": "2b655f8b1ea64df8",
      "ogs:core:benchmark-task:pair-012": "3fe344c62017694c",
      "ogs:core:benchmark-task:pair-013": "4d30c6e439c9c85f",
      "ogs:core:benchmark-task:pair-014": "848f37f91777acfb",
      "ogs:core:benchmark-task:pair-015": "f5ecf9dcd59f0d0a",
      "ogs:core:benchmark-task:pair-016": "3dd7c0170877ac9b",
      "ogs:core:benchmark-task:pair-017": "8f60336584f33baf",
      "ogs:core:benchmark-task:pair-018": "9209873bc05779b7",
      "ogs:core:benchmark-task:pair-019": "da735293a9d3102b",
      "ogs:core:benchmark-task:pair-020": "bac251ebb1c064ef",
      "ogs:core:benchmark-task:region-001": "1df867dd3a55af5b",
      "ogs:core:benchmark-task:region-002": "24a95f8a1c0a0a45",
      "ogs:core:benchmark-task:region-003": "15d36148f8b853e6",
      "ogs:core:benchmark-task:region-004": "e332af036a2bd591",
      "ogs:core:benchmark-task:region-005": "4aa2c63ffdeeacfb",
      "ogs:core:benchmark-task:region-006": "979e7579b34d9797",
      "ogs:core:benchmark-task:region-007": "23a69016b1082553",
      "ogs:core:benchmark-task:region-008": "0a12eb057eb17654",
      "ogs:core:benchmark-task:region-009": "767d178c3cce3333",
      "ogs:core:benchmark-task:region-010": "1001169bc92aaacd",
      "ogs:core:benchmark-task:region-011": "571e5074e66e6a11",
      "ogs:core:benchmark-task:region-012": "d9792be4d5ca7335",
      "ogs:core:benchmark-task:region-013": "c831a091be5025f9",
      "ogs:core:benchmark-task:region-014": "7d761cbfb86342af",
      "ogs:core:benchmark-task:region-015": "2f35d24bb9d58118",
      "ogs:core:benchmark-task:region-016": "efb489c56faf463a",
      "ogs:core:benchmark-task:region-017": "53530e4634162afc",
      "ogs:core:benchmark-task:region-018": "28a636bdc82633de",
      "ogs:core:benchmark-task:region-019": "2c5869ce479f65cc",
      "ogs:core:benchmark-task:region-020": "aa3761bdbf2d490e",
      "ogs:core:benchmark-task:risk-001": "08845fcf7f8beb09",
      "ogs:core:benchmark-task:risk-002": "7a188b424515258f",
      "ogs:core:benchmark-task:risk-003": "a60fa6dd5c1ff452",
      "ogs:core:benchmark-task:risk-004": "16286de7fea14ae1",
      "ogs:core:benchmark-task:risk-005": "cd44f8ac361108e8",
      "ogs:core:benchmark-task:risk-006": "28cb271326a709c7",
      "ogs:core:benchmark-task:risk-007": "93c46eac23eca249",
      "ogs:core:benchmark-task:risk-008": "2409e606629eebf9",
      "ogs:core:benchmark-task:risk-009": "a00498fd7fce7cb7",
      "ogs:core:benchmark-task:risk-010": "b28af5fc40407ce0",
      "ogs:core:benchmark-task:risk-011": "fba6f85afa81595f",
      "ogs:core:benchmark-task:risk-012": "2f3a4360a39afdc9",
      "ogs:core:benchmark-task:risk-013": "390e90cae060ada8",
      "ogs:core:benchmark-task:risk-014": "d83b8b2ecabe9907",
      "ogs:core:benchmark-task:risk-015": "1e83097d483e0639",
      "ogs:core:benchmark-task:risk-016": "b2e02272ca87b2d4",
      "ogs:core:benchmark-task:risk-017": "12372c8de13add55",
      "ogs:core:benchmark-task:risk-018": "7a97c002792dfad9",
      "ogs:core:benchmark-task:risk-019": "0e3d6c3f9d86af0b",
      "ogs:core:benchmark-task:risk-020": "2dd649d25c606ee7",
      "ogs:core:benchmark-task:safe-001": "8e37b185cfaed2aa",
      "ogs:core:benchmark-task:safe-002": "d38d6470a18f7e97",
      "ogs:core:benchmark-task:safe-003": "97f1d8e6fc44b9e2",
      "ogs:core:benchmark-task:safe-004": "9e5770b6c7036769",
      "ogs:core:benchmark-task:safe-005": "cb2ff73059921b59",
      "ogs:core:benchmark-task:safe-006": "b3c9f3a848d65390",
      "ogs:core:benchmark-task:safe-007": "f120f4a38737e981",
      "ogs:core:benchmark-task:safe-008": "9b8f975bd042c7a0",
      "ogs:core:benchmark-task:safe-009": "9f4b49c1c20e4376",
      "ogs:core:benchmark-task:safe-010": "eea6814938f18565",
      "ogs:core:benchmark-task:safe-011": "edc2981cec11c39e",
      "ogs:core:benchmark-task:safe-012": "2ba4113ffe26b2fc",
      "ogs:core:benchmark-task:sense-001": "913118356da8150b",
      "ogs:core:benchmark-task:sense-002": "2690028d89b60292",
      "ogs:core:benchmark-task:sense-003": "b09502eb4fd5d61f",
      "ogs:core:benchmark-task:sense-004": "94f009713056d52f",
      "ogs:core:benchmark-task:sense-005": "c950ec9851ad52bf",
      "ogs:core:benchmark-task:sense-006": "d68ef8562addd55d",
      "ogs:core:benchmark-task:sense-007": "658c14cce434cc0a",
      "ogs:core:benchmark-task:sense-008": "873c6c2f9608e43f",
      "ogs:core:benchmark-task:sense-009": "523e04f39252cb4b",
      "ogs:core:benchmark-task:sense-010": "1813cae53ba17a84",
      "ogs:core:benchmark-task:sense-011": "1886b2e009716d1d",
      "ogs:core:benchmark-task:sense-012": "8d40a83f1b501685",
      "ogs:core:benchmark-task:sense-013": "691f59088f3e98f3",
      "ogs:core:benchmark-task:sense-014": "c4a83a79265c0e9f",
      "ogs:core:benchmark-task:sense-015": "66d64ca2254c09ee",
      "ogs:core:benchmark-task:sense-016": "70318c6fd3b033d2",
      "ogs:core:benchmark-task:sense-017": "c3cedde89152ce76",
      "ogs:core:benchmark-task:sense-018": "7d31de11173a7fd2",
      "ogs:core:benchmark-task:sense-019": "2e7df899a0148ebd",
      "ogs:core:benchmark-task:sense-020": "ca5b9ab85fe71acb",
      "ogs:core:benchmark-task:struct-001": "4523d68b7da194f7",
      "ogs:core:benchmark-task:struct-002": "367b58556573440d",
      "ogs:core:benchmark-task:struct-003": "2740dc70d150e12a",
      "ogs:core:benchmark-task:struct-004": "9bd6860a8c5c69ec",
      "ogs:core:benchmark-task:struct-005": "9caf9bc474b2464d",
      "ogs:core:benchmark-task:struct-006": "bb4d0fcee2d1e751",
      "ogs:core:benchmark-task:struct-007": "0bc2e49ee6c1093e",
      "ogs:core:benchmark-task:struct-008": "791ac1beebd95233",
      "ogs:core:benchmark-task:struct-009": "a6a51cef64e43626",
      "ogs:core:benchmark-task:struct-010": "6d4d6d3cc23f3283",
      "ogs:core:benchmark-task:struct-011": "6615664480900680",
      "ogs:core:benchmark-task:struct-012": "bd330891a3e68b59",
      "ogs:core:benchmark-task:struct-013": "872d23f39307a374",
      "ogs:core:benchmark-task:struct-014": "3fe3fe74f8d80260",
      "ogs:core:benchmark-task:struct-015": "f0c01fa20f35eb51",
      "ogs:core:benchmark-task:struct-016": "284b25ae8f0dc3ef",
      "ogs:core:benchmark-task:struct-017": "534d8590759d6dc3",
      "ogs:core:benchmark-task:struct-018": "1470a28b8e9ef028",
      "ogs:core:benchmark-task:struct-019": "aa2726470aa7cfb8",
      "ogs:core:benchmark-task:struct-020": "5b115600e8dad71f",
      "ogs:core:benchmark-task:vocab-001": "e52270d817a9dc7f",
      "ogs:core:benchmark-task:vocab-002": "e2096242953ff882",
      "ogs:core:benchmark-task:vocab-003": "fa5fa32bfe275dfb",
      "ogs:core:benchmark-task:vocab-004": "e0631034b8b8ece4",
      "ogs:core:benchmark-task:vocab-005": "bbbd81bb3c8b8ea4",
      "ogs:core:benchmark-task:vocab-006": "6c6ae6f82f354be3",
      "ogs:core:benchmark-task:vocab-007": "0adac8c06022acf5",
      "ogs:core:benchmark-task:vocab-008": "e58319dc1276cdb2",
      "ogs:core:benchmark-task:vocab-009": "d51864ab27ecfa64",
      "ogs:core:benchmark-task:vocab-010": "1748069ad0ebdde7",
      "ogs:core:benchmark-task:vocab-011": "85de6add5c1ebfba",
      "ogs:core:benchmark-task:vocab-012": "44c9b26dc70c4557",
      "ogs:core:benchmark-task:vocab-013": "dc3e62daf8bdf21a",
      "ogs:core:benchmark-task:vocab-014": "3980456a47e9b7c6",
      "ogs:core:benchmark-task:vocab-015": "ad313a98974d64f7",
      "ogs:core:benchmark-task:vocab-016": "95e41ab82cba029a",
      "ogs:core:benchmark-task:vocab-017": "8070fb687fd2f108",
      "ogs:core:benchmark-task:vocab-018": "9e97b59361dbdf0e",
      "ogs:core:benchmark-task:vocab-019": "fc9a763682f1225b",
      "ogs:core:benchmark-task:vocab-020": "56ad4f9c58bc1eb7",
      "ogs:core:benchmark-task:xplain-001": "a75ad0ee2a385794",
      "ogs:core:benchmark-task:xplain-002": "bdc5d8d74c85e9ab",
      "ogs:core:benchmark-task:xplain-003": "3440bcf3978a1c58",
      "ogs:core:benchmark-task:xplain-004": "b3249dafda966f70",
      "ogs:core:benchmark-task:xplain-005": "0cb2f1946943a0e2",
      "ogs:core:benchmark-task:xplain-006": "9a46b0d141442ce3",
      "ogs:core:benchmark-task:xplain-007": "497adc64f3303eb4",
      "ogs:core:benchmark-task:xplain-008": "2120f96a0444a50e",
      "ogs:core:benchmark-task:xplain-009": "c077c498ceed5f73",
      "ogs:core:benchmark-task:xplain-010": "956c6ce654c2e297",
      "ogs:core:benchmark-task:xplain-011": "1dfeb782059a4814",
      "ogs:core:benchmark-task:xplain-012": "28df36c8ae155697",
      "ogs:core:benchmark-task:xplain-013": "18a20e7e30844c41",
      "ogs:core:benchmark-task:xplain-014": "f9be4ef2853d67b3",
      "ogs:core:benchmark-task:xplain-015": "5474f731e5d9751c",
      "ogs:core:benchmark-task:xplain-016": "fc7f227aebed6756",
      "ogs:core:benchmark-task:xplain-017": "024198497e327bb1",
      "ogs:core:benchmark-task:xplain-018": "283bbb9e703e6e30",
      "ogs:core:benchmark-task:xplain-019": "9ea6cd79384f9ee2",
      "ogs:core:benchmark-task:xplain-020": "43005f44c870684b"
    }
  },
  "tasks": [
    {
      "task_id": "ogs:core:benchmark-task:comp-001",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "5dc05e883ee75cf7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index (0 = no browning, 5 = fully charred), return the integer value best describing the described cook. Respond with just the integer.\n\nINPUT:\n{\n  \"description\": \"a duck breast seared skin-side down until the skin is deep mahogany brown and crisp, but not black\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 3,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "3",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 3.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 583.6,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "3",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 3.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 507.4,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "3",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 3.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 709.8,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 409.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 600.267,
        "usd_sum": 0.000495
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-002",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "d5fef8891eee5391",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index (0 = no browning, 5 = fully charred), return the integer value for the described cook. Respond with just the integer.\n\nINPUT:\n{\n  \"description\": \"a fresh egg poached in 82 C water for 4 minutes\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 400,
            "output_tokens": 1,
            "latency_ms": 614.8,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 400,
            "output_tokens": 1,
            "latency_ms": 816.9,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 400,
            "output_tokens": 1,
            "latency_ms": 596.2,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 400.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 675.967,
        "usd_sum": 0.000486
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-003",
      "category": "composition",
      "capability": "reduction_level",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.reduction_level"
      ],
      "prompt_template_hash": "7b7db2c313e1d55b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Reduction level (0-5 integer)\n\n- 0: no reduction (raw stock, unreduced liquid)\n- 1: light simmer reduction (~10%)\n- 2: modest reduction (~30%, nappe forming)\n- 3: substantial reduction (~50%, demi-glace territory)\n- 4: heavy reduction (~75%, glace-like, syrupy)\n- 5: near-complete reduction (glace de viande, paste)\n\n---\n\nUsing the OGS reduction level (0 = no reduction, 5 = glace-level), return the integer best describing the sauce. Respond with just the integer.\n\nINPUT:\n{\n  \"description\": \"a veal demi-glace reduced from 4 L of stock down to about 600 mL, thickly coating a spoon with a glossy sheen\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 4,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "3",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 3.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 598.9,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "3",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 3.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 501.3,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "3",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 3.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 608.3,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 398.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 569.5,
        "usd_sum": 0.000483
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-004",
      "category": "composition",
      "capability": "cooking_method_recognition",
      "difficulty": "hard",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "ca8109a73af762fa",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nA piece of black cod is packed in salt-sugar-sake-lees paste and kept refrigerated for 48 hours, then rinsed and grilled over binchotan. What is the single OGS cooking method that best labels the BEFORE-grilling stage? Respond with just the method name.\n\nINPUT:\n{\n  \"description\": \"black cod packed in salt-sugar-sake-lees paste for 48h before grilling\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "cured"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 432,
            "output_tokens": 2,
            "latency_ms": 534.7,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 432,
            "output_tokens": 2,
            "latency_ms": 495.6,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 432,
            "output_tokens": 2,
            "latency_ms": 575.9,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 432.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 535.4,
        "usd_sum": 0.000528
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-005",
      "category": "composition",
      "capability": "component_role_assignment",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "2885728399552fb5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nFor the described dish element, return the single OGS component role that best applies. Respond with just the role name.\n\nINPUT:\n{\n  \"element\": \"a small handful of thin, deep-fried shallot rings scattered on top of a braised beef rendang primarily for crunchy contrast\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "textural_element"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "textural_element",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "textural_element",
            "expected": "textural_element",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 357,
            "output_tokens": 3,
            "latency_ms": 608.8,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "textural_element",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "textural_element",
            "expected": "textural_element",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 357,
            "output_tokens": 3,
            "latency_ms": 537.9,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "textural_element",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "textural_element",
            "expected": "textural_element",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 357,
            "output_tokens": 3,
            "latency_ms": 780.8,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 357.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 642.5,
        "usd_sum": 0.000444
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-006",
      "category": "composition",
      "capability": "component_set_identification",
      "difficulty": "hard",
      "cuisine_region": "italian",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "bb37bf992b2f6c39",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nList the OGS component ROLES that appear in a classic Roman spaghetti alla carbonara (guanciale, eggs, pecorino romano, spaghetti, black pepper). Respond with a JSON array of role strings from the OGS vocabulary, without duplicates, in any order.\n\nINPUT:\n{\n  \"dish\": \"classic Roman spaghetti alla carbonara\",\n  \"components_present\": [\n    \"spaghetti\",\n    \"guanciale\",\n    \"egg yolks\",\n    \"pecorino romano\",\n    \"black pepper\"\n  ]\n}\n\nANSWER FORMAT: json\n\nRESPONSE JSON SCHEMA:\n{\n  \"type\": \"array\",\n  \"items\": {\n    \"type\": \"string\",\n    \"enum\": [\n      \"main_protein\",\n      \"main_vegetable\",\n      \"starch\",\n      \"sauce\",\n      \"fat_component\",\n      \"acid_component\",\n      \"aromatic_garnish\",\n      \"seasoning\",\n      \"textural_element\"\n    ]\n  },\n  \"uniqueItems\": true\n}",
      "expected": {
        "values": [
          "starch",
          "main_protein",
          "fat_component",
          "sauce",
          "seasoning"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"main_protein\",\"starch\",\"fat_component\",\"sauce\",\"seasoning\"]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "expected": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "precision": 1.0,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 468,
            "output_tokens": 17,
            "latency_ms": 807.7,
            "usd": 0.000214,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"main_protein\",\n  \"starch\",\n  \"fat_component\",\n  \"seasoning\",\n  \"sauce\"\n]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "expected": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "precision": 1.0,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 468,
            "output_tokens": 28,
            "latency_ms": 788.7,
            "usd": 0.000232,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"main_protein\",\n  \"starch\",\n  \"fat_component\",\n  \"seasoning\",\n  \"sauce\"\n]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "expected": [
              "fat_component",
              "main_protein",
              "sauce",
              "seasoning",
              "starch"
            ],
            "precision": 1.0,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 468,
            "output_tokens": 28,
            "latency_ms": 884.0,
            "usd": 0.000232,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 468.0,
        "output_tokens_mean": 24.333,
        "latency_ms_mean": 826.8,
        "usd_sum": 0.000678
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-007",
      "category": "composition",
      "capability": "service_temperature",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.service_temperatures"
      ],
      "prompt_template_hash": "59ae41132f38bbfd",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n---\n\nReturn the single OGS service_temperature value that matches the traditional service of the dish below. Respond with just the value.\n\nINPUT:\n{\n  \"dish\": \"classic French vichyssoise (leek and potato soup)\"\n}\n\nANSWER FORMAT: one_of:frozen,cold,cool,room,warm,hot,very_hot",
      "expected": {
        "value": "cold"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 313,
            "output_tokens": 1,
            "latency_ms": 575.8,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 313,
            "output_tokens": 1,
            "latency_ms": 478.6,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 313,
            "output_tokens": 1,
            "latency_ms": 717.7,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 313.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 590.7,
        "usd_sum": 0.000381
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-008",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "5121ce4bf46aa4d5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index (0 = no browning, 5 = fully charred), return the integer for a steamed dim sum dumpling wrapper.\n\nINPUT:\n{\n  \"description\": \"translucent wheat-starch wrapper on a har gow, bamboo-steamed\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 589.3,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 511.9,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 374.5,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 398.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 491.9,
        "usd_sum": 0.000483
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-009",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "2a8a86bb58372ed7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index (0-5 integer), return the value for a chicken breast poached in stock at 82°C with no prior searing.\n\nINPUT:\n{\n  \"description\": \"chicken breast poached in 82°C stock, never seared\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 0,
        "tolerance": 0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 570.6,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 581.7,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 0.0,
            "expected": 0.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 707.3,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 398.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 619.867,
        "usd_sum": 0.000483
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-010",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "d420afcaea603a0e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index, return the integer for a scallop seared in hot butter until a deep golden crust forms on one side.\n\nINPUT:\n{\n  \"description\": \"diver scallop seared in foaming butter, one side deep golden, interior translucent\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 3,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 531.8,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 501.6,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 398,
            "output_tokens": 1,
            "latency_ms": 485.7,
            "usd": 0.000161,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 398.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 506.367,
        "usd_sum": 0.000483
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-011",
      "category": "composition",
      "capability": "reduction_level",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.reduction_level"
      ],
      "prompt_template_hash": "1d18b06eac9f3ddc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Reduction level (0-5 integer)\n\n- 0: no reduction (raw stock, unreduced liquid)\n- 1: light simmer reduction (~10%)\n- 2: modest reduction (~30%, nappe forming)\n- 3: substantial reduction (~50%, demi-glace territory)\n- 4: heavy reduction (~75%, glace-like, syrupy)\n- 5: near-complete reduction (glace de viande, paste)\n\n---\n\nUsing the OGS reduction level (0-5 integer), return the value for a pan sauce reduced until it coats the back of a spoon (nappe).\n\nINPUT:\n{\n  \"description\": \"red wine pan sauce reduced until it coats the back of a spoon without running\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 3,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 383,
            "output_tokens": 1,
            "latency_ms": 599.2,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 383,
            "output_tokens": 1,
            "latency_ms": 397.7,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "2",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 2.0,
            "expected": 3.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 383,
            "output_tokens": 1,
            "latency_ms": 471.7,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 383.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 489.533,
        "usd_sum": 0.000465
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-012",
      "category": "composition",
      "capability": "reduction_level",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.reduction_level"
      ],
      "prompt_template_hash": "fa91f74e86a4cffb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Reduction level (0-5 integer)\n\n- 0: no reduction (raw stock, unreduced liquid)\n- 1: light simmer reduction (~10%)\n- 2: modest reduction (~30%, nappe forming)\n- 3: substantial reduction (~50%, demi-glace territory)\n- 4: heavy reduction (~75%, glace-like, syrupy)\n- 5: near-complete reduction (glace de viande, paste)\n\n---\n\nUsing the OGS reduction level, return the integer for glace de viande (gelatinous concentrate that sets firmly at room temperature).\n\nINPUT:\n{\n  \"description\": \"glace de viande, reduced from 10 L brown stock to ~500 mL\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 5,
        "tolerance": 0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 5.0,
            "expected": 5.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 1,
            "latency_ms": 375.1,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 5.0,
            "expected": 5.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 1,
            "latency_ms": 1332.6,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 5.0,
            "expected": 5.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 1,
            "latency_ms": 574.2,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 379.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 760.633,
        "usd_sum": 0.000459
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-013",
      "category": "composition",
      "capability": "cooking_method_recognition",
      "difficulty": "medium",
      "cuisine_region": "korean",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "f8257062be99b5d3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nReturn the single OGS cooking method that best fits the main cooking step for bossam pork belly (~90 min at low simmer in aromatic water).\n\nINPUT:\n{\n  \"description\": \"pork belly gently simmered for 90 minutes in water with ginger, doenjang, and pepper until fork-tender\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "poached"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "poached",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "poached",
            "expected": "poached",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 2,
            "latency_ms": 617.3,
            "usd": 0.000167,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "poached",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "poached",
            "expected": "poached",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 2,
            "latency_ms": 546.7,
            "usd": 0.000167,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "poached",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "poached",
            "expected": "poached",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 2,
            "latency_ms": 354.7,
            "usd": 0.000167,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 409.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 506.233,
        "usd_sum": 0.000501
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-014",
      "category": "composition",
      "capability": "component_role_assignment",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "114b0796c10cf141",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nReturn the single OGS component role that best fits a sprinkle of gremolata (parsley, garlic, lemon zest) finishing an osso buco at service.\n\nINPUT:\n{\n  \"description\": \"gremolata finishing an osso buco milanese at service\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "aromatic_garnish"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "aromatic_garnish",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "aromatic_garnish",
            "expected": "aromatic_garnish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 5,
            "latency_ms": 546.7,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "aromatic_garnish",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "aromatic_garnish",
            "expected": "aromatic_garnish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 5,
            "latency_ms": 959.1,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "aromatic_garnish",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "aromatic_garnish",
            "expected": "aromatic_garnish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 5,
            "latency_ms": 611.6,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 355.0,
        "output_tokens_mean": 5.0,
        "latency_ms_mean": 705.8,
        "usd_sum": 0.00045
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-015",
      "category": "composition",
      "capability": "component_set_identification",
      "difficulty": "hard",
      "cuisine_region": "thai",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "07976f0ee97244e5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nList the OGS component roles that appear in a classic Pad Thai (rice noodles, shrimp, tofu, egg, tamarind-palm sugar-fish sauce, bean sprouts, crushed peanuts, chives). Return a JSON array of role names.\n\nINPUT:\n{\n  \"dish\": \"classic Pad Thai\"\n}\n\nANSWER FORMAT: set_of_roles",
      "expected": {
        "values": [
          "starch",
          "main_protein",
          "acid_component",
          "seasoning",
          "aromatic_garnish",
          "textural_element",
          "fat_component"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"main_protein\",\"starch\",\"sauce\",\"main_vegetable\",\"fat_component\",\"acid_component\",\"textural_element\",\"aromatic_garnish\"]",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "acid_component",
              "aromatic_garnish",
              "fat_component",
              "main_protein",
              "main_vegetable",
              "sauce",
              "starch",
              "textural_element"
            ],
            "expected": [
              "acid_component",
              "aromatic_garnish",
              "fat_component",
              "main_protein",
              "seasoning",
              "starch",
              "textural_element"
            ],
            "precision": 0.75,
            "recall": 0.8571
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 32,
            "latency_ms": 1073.5,
            "usd": 0.000183,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"starch\",\"main_protein\",\"main_vegetable\",\"fat_component\",\"acid_component\",\"textural_element\",\"aromatic_garnish\"]",
          "compliant": true,
          "score": 0.8571,
          "detail": {
            "predicted": [
              "acid_component",
              "aromatic_garnish",
              "fat_component",
              "main_protein",
              "main_vegetable",
              "starch",
              "textural_element"
            ],
            "expected": [
              "acid_component",
              "aromatic_garnish",
              "fat_component",
              "main_protein",
              "seasoning",
              "starch",
              "textural_element"
            ],
            "precision": 0.8571,
            "recall": 0.8571
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 29,
            "latency_ms": 834.0,
            "usd": 0.000178,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"main_protein\",\"starch\",\"sauce\",\"main_vegetable\",\"textural_element\",\"aromatic_garnish\"]",
          "compliant": true,
          "score": 0.6154,
          "detail": {
            "predicted": [
              "aromatic_garnish",
              "main_protein",
              "main_vegetable",
              "sauce",
              "starch",
              "textural_element"
            ],
            "expected": [
              "acid_component",
              "aromatic_garnish",
              "fat_component",
              "main_protein",
              "seasoning",
              "starch",
              "textural_element"
            ],
            "precision": 0.6667,
            "recall": 0.5714
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 26,
            "latency_ms": 751.9,
            "usd": 0.000173,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.7575,
      "score_summary": {
        "mean": 0.7575,
        "stdev": 0.1263,
        "sem": 0.0729,
        "ci95_lo": 0.6145,
        "ci95_hi": 0.9005,
        "n": 3
      },
      "score_if_valid": 0.7575,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 329.0,
        "output_tokens_mean": 29.0,
        "latency_ms_mean": 886.467,
        "usd_sum": 0.000534
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-016",
      "category": "composition",
      "capability": "service_temperature",
      "difficulty": "easy",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.service_temperatures"
      ],
      "prompt_template_hash": "609a0643399a8df0",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n---\n\nReturn the single OGS service_temperature value for a Spanish ajo blanco (chilled almond-garlic soup).\n\nINPUT:\n{\n  \"dish\": \"ajo blanco, chilled Andalusian almond and garlic soup\"\n}\n\nANSWER FORMAT: one_of:frozen,cold,cool,room,warm,hot,very_hot",
      "expected": {
        "value": "cold"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 309,
            "output_tokens": 1,
            "latency_ms": 555.4,
            "usd": 0.000125,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 309,
            "output_tokens": 1,
            "latency_ms": 624.8,
            "usd": 0.000125,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 309,
            "output_tokens": 1,
            "latency_ms": 523.9,
            "usd": 0.000125,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 309.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 568.033,
        "usd_sum": 0.000375
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-017",
      "category": "composition",
      "capability": "cooking_method_recognition",
      "difficulty": "medium",
      "cuisine_region": "chinese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "09a6e47eda264ded",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nReturn the single OGS cooking method that best describes the signature crisp skin of Peking duck (air-dried then hung-roasted in a wood-fired oven).\n\nINPUT:\n{\n  \"description\": \"Peking duck hung-roasted in a jujube-wood oven at ~230°C until the skin is lacquered and crisp\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "roasted"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 414,
            "output_tokens": 2,
            "latency_ms": 552.6,
            "usd": 0.000169,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 414,
            "output_tokens": 2,
            "latency_ms": 545.9,
            "usd": 0.000169,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 414,
            "output_tokens": 2,
            "latency_ms": 534.0,
            "usd": 0.000169,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 414.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 544.167,
        "usd_sum": 0.000507
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-018",
      "category": "composition",
      "capability": "component_role_assignment",
      "difficulty": "medium",
      "cuisine_region": "levantine",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "32d4a921f4fbdd98",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nReturn the OGS component role that best fits tahini as an ingredient in a classic hummus recipe.\n\nINPUT:\n{\n  \"description\": \"25% tahini by weight blended into chickpea puree to make hummus\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "fat_component"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "fat_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fat_component",
            "expected": "fat_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 429.8,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "fat_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fat_component",
            "expected": "fat_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 419.0,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "fat_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fat_component",
            "expected": "fat_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 492.4,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 343.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 447.067,
        "usd_sum": 0.00042
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-019",
      "category": "composition",
      "capability": "service_temperature",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.service_temperatures"
      ],
      "prompt_template_hash": "eaa27377a264e2e2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n---\n\nReturn the OGS service_temperature for a stirred-over-ice Negroni served in a rocks glass with a large cube.\n\nINPUT:\n{\n  \"dish\": \"stirred Negroni served over a single large ice cube\"\n}\n\nANSWER FORMAT: one_of:frozen,cold,cool,room,warm,hot,very_hot",
      "expected": {
        "value": "cold"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cool",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "cool",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 541.6,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "cold",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cold",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 582.1,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "cool",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "cool",
            "expected": "cold",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 504.4,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 314.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 542.7,
        "usd_sum": 0.000381
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:comp-020",
      "category": "composition",
      "capability": "maillard_index",
      "difficulty": "hard",
      "cuisine_region": "american",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.maillard_index"
      ],
      "prompt_template_hash": "535bf59391539a15",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Maillard index (0-5 integer)\n\n- 0: no browning (raw, poached, steamed)\n- 1: pale gold (soft-baked dough, lightly sauteed)\n- 2: golden brown (roasted chicken skin)\n- 3: deep mahogany (well-seared steak crust, duck breast skin)\n- 4: very dark brown with bitter edge (heavy crust, blackened fish)\n- 5: fully charred (intentionally blackened, carbonised)\n\n---\n\nUsing the OGS Maillard index, return the integer for blackened redfish cooked in a smoking-hot cast iron skillet with Cajun spice rub.\n\nINPUT:\n{\n  \"description\": \"redfish fillet crusted with spice and cooked in a 300°C dry cast-iron pan until the surface is blackened and slightly bitter\"\n}\n\nANSWER FORMAT: integer_0_5",
      "expected": {
        "value": 4,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "4",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 4.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 896.5,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "4",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 4.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 517.8,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "4",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 4.0,
            "expected": 4.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 409,
            "output_tokens": 1,
            "latency_ms": 488.7,
            "usd": 0.000165,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 409.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 634.333,
        "usd_sum": 0.000495
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-001",
      "category": "dietary",
      "capability": "vegan_compatibility",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "18b5c9c7c958a521",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nIs a traditional Bolognese tagliatelle al ragù (tagliatelle with ragù made from beef, pancetta, soffritto, tomato, red wine, whole milk, and butter) compatible with a strict vegan diet? Answer with a single word: yes or no.\n\nINPUT:\n{\n  \"dish\": \"tagliatelle al ragù\",\n  \"diet\": \"vegan\"\n}\n\nANSWER FORMAT: exact:yes|no",
      "expected": {
        "value": "no"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "no",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 911,
            "output_tokens": 1,
            "latency_ms": 521.8,
            "usd": 0.000366,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "no",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 911,
            "output_tokens": 1,
            "latency_ms": 392.2,
            "usd": 0.000366,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "no",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 911,
            "output_tokens": 1,
            "latency_ms": 647.5,
            "usd": 0.000366,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 911.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 520.5,
        "usd_sum": 0.001098
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-002",
      "category": "dietary",
      "capability": "gluten_free_violation",
      "difficulty": "easy",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "04840e46f80ccb5b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nA kitchen claims this stir-fry is gluten-free: rice noodles, tofu, shiitake, broccoli, garlic, ginger, regular Japanese soy sauce, sesame oil, and scallions. Return the single ingredient that breaks the claim.\n\nINPUT:\n{\n  \"dish\": \"stir fry\",\n  \"ingredients\": [\n    \"rice noodles\",\n    \"tofu\",\n    \"shiitake\",\n    \"broccoli\",\n    \"garlic\",\n    \"ginger\",\n    \"regular Japanese soy sauce\",\n    \"sesame oil\",\n    \"scallions\"\n  ]\n}\n\nANSWER FORMAT: exact:string",
      "expected": {
        "value": "regular Japanese soy sauce",
        "aliases": [
          "soy sauce",
          "regular soy sauce"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "regular Japanese soy sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "regular japanese soy sauce",
            "expected": "regular japanese soy sauce",
            "aliases": [
              "soy sauce",
              "regular soy sauce"
            ]
          },
          "metrics": {
            "input_tokens": 939,
            "output_tokens": 4,
            "latency_ms": 692.4,
            "usd": 0.000382,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "regular Japanese soy sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "regular japanese soy sauce",
            "expected": "regular japanese soy sauce",
            "aliases": [
              "soy sauce",
              "regular soy sauce"
            ]
          },
          "metrics": {
            "input_tokens": 939,
            "output_tokens": 4,
            "latency_ms": 464.0,
            "usd": 0.000382,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "regular Japanese soy sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "regular japanese soy sauce",
            "expected": "regular japanese soy sauce",
            "aliases": [
              "soy sauce",
              "regular soy sauce"
            ]
          },
          "metrics": {
            "input_tokens": 939,
            "output_tokens": 4,
            "latency_ms": 568.1,
            "usd": 0.000382,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 939.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 574.833,
        "usd_sum": 0.001146
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-003",
      "category": "dietary",
      "capability": "halal_compliance",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "013d6295fee472b5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nIs a classic panna cotta (cream, sugar, vanilla, leaf gelatine) halal? Answer with yes or no.\n\nINPUT:\n{\n  \"dish\": \"panna cotta\",\n  \"ingredients\": [\n    \"cream\",\n    \"sugar\",\n    \"vanilla\",\n    \"leaf gelatine\"\n  ]\n}\n\nANSWER FORMAT: exact:yes|no",
      "expected": {
        "value": "no"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:no",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 891,
            "output_tokens": 2,
            "latency_ms": 609.4,
            "usd": 0.00036,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:no",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 891,
            "output_tokens": 2,
            "latency_ms": 934.8,
            "usd": 0.00036,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:no",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:no",
            "expected": "no",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 891,
            "output_tokens": 2,
            "latency_ms": 631.6,
            "usd": 0.00036,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 891.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 725.267,
        "usd_sum": 0.00108
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-004",
      "category": "dietary",
      "capability": "kosher_meat_dairy",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "85fc94958c4357ef",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nChicken parmigiana is breaded chicken breast topped with tomato sauce and melted mozzarella, served with a side of cream-rich garlic mashed potatoes. From a kosher perspective, what is the core violation? Respond with one short phrase.\n\nINPUT:\n{\n  \"dish\": \"chicken parmigiana with creamy mash\"\n}\n\nANSWER FORMAT: exact:string",
      "expected": {
        "value": "mixing meat and dairy",
        "aliases": [
          "meat and dairy mixed",
          "basar b'chalav",
          "poultry with dairy",
          "meat with dairy"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "mixing meat and dairy",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mixing meat and dairy",
            "expected": "mixing meat and dairy",
            "aliases": [
              "meat and dairy mixed",
              "basar b'chalav",
              "poultry with dairy",
              "meat with dairy"
            ]
          },
          "metrics": {
            "input_tokens": 885,
            "output_tokens": 5,
            "latency_ms": 431.1,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "mixing meat and dairy",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mixing meat and dairy",
            "expected": "mixing meat and dairy",
            "aliases": [
              "meat and dairy mixed",
              "basar b'chalav",
              "poultry with dairy",
              "meat with dairy"
            ]
          },
          "metrics": {
            "input_tokens": 885,
            "output_tokens": 5,
            "latency_ms": 741.2,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "mixing meat and dairy",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mixing meat and dairy",
            "expected": "mixing meat and dairy",
            "aliases": [
              "meat and dairy mixed",
              "basar b'chalav",
              "poultry with dairy",
              "meat with dairy"
            ]
          },
          "metrics": {
            "input_tokens": 885,
            "output_tokens": 5,
            "latency_ms": 630.6,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 885.0,
        "output_tokens_mean": 5.0,
        "latency_ms_mean": 600.967,
        "usd_sum": 0.001086
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-005",
      "category": "dietary",
      "capability": "allergen_detection",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "f40f4c25a2ee4b71",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nFor a dish of pesto genovese made from basil, garlic, pine nuts, parmesan, pecorino, and olive oil, list the EU Annex II allergens that must be declared. Return a comma-separated list using the OGS allergen IDs.\n\nINPUT:\n{\n  \"dish\": \"pesto genovese\",\n  \"ingredients\": [\n    \"basil\",\n    \"garlic\",\n    \"pine nuts\",\n    \"parmesan\",\n    \"pecorino\",\n    \"olive oil\"\n  ]\n}\n\nANSWER FORMAT: set:comma-separated OGS allergen IDs",
      "expected": {
        "values": [
          "tree_nuts",
          "milk"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:cereals_containing_gluten,milk,tree_nuts",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "milk",
              "set:cereals_containing_gluten",
              "tree_nuts"
            ],
            "expected": [
              "milk",
              "tree_nuts"
            ],
            "precision": 0.6667,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 934,
            "output_tokens": 14,
            "latency_ms": 875.3,
            "usd": 0.000396,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:cereals_containing_gluten,milk,tree_nuts",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "milk",
              "set:cereals_containing_gluten",
              "tree_nuts"
            ],
            "expected": [
              "milk",
              "tree_nuts"
            ],
            "precision": 0.6667,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 934,
            "output_tokens": 14,
            "latency_ms": 717.6,
            "usd": 0.000396,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:cereals_containing_gluten,milk,tree_nuts",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "milk",
              "set:cereals_containing_gluten",
              "tree_nuts"
            ],
            "expected": [
              "milk",
              "tree_nuts"
            ],
            "precision": 0.6667,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 934,
            "output_tokens": 14,
            "latency_ms": 651.6,
            "usd": 0.000396,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8,
      "score_summary": {
        "mean": 0.8,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.8,
        "ci95_hi": 0.8,
        "n": 3
      },
      "score_if_valid": 0.8,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 934.0,
        "output_tokens_mean": 14.0,
        "latency_ms_mean": 748.167,
        "usd_sum": 0.001188
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-006",
      "category": "dietary",
      "capability": "allergen_detection",
      "difficulty": "medium",
      "cuisine_region": "thai",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "7f45e388301a68a5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nReturn the EU Annex II allergens that must be declared for a standard pad thai (rice noodles, shrimp, egg, tamarind, fish sauce, palm sugar, roasted peanuts, bean sprouts, garlic chives, lime, chili).\n\nINPUT:\n{\n  \"dish\": \"pad thai\"\n}\n\nANSWER FORMAT: set:comma-separated OGS allergen IDs",
      "expected": {
        "values": [
          "crustaceans",
          "eggs",
          "fish",
          "peanuts"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cereals_containing_gluten,crustaceans,eggs,fish,peanuts,soybeans",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "cereals_containing_gluten",
              "crustaceans",
              "eggs",
              "fish",
              "peanuts",
              "soybeans"
            ],
            "expected": [
              "crustaceans",
              "eggs",
              "fish",
              "peanuts"
            ],
            "precision": 0.6667,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 888,
            "output_tokens": 23,
            "latency_ms": 703.0,
            "usd": 0.000392,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:crustaceans,eggs,fish,peanuts",
          "compliant": true,
          "score": 0.75,
          "detail": {
            "predicted": [
              "eggs",
              "fish",
              "peanuts",
              "set:crustaceans"
            ],
            "expected": [
              "crustaceans",
              "eggs",
              "fish",
              "peanuts"
            ],
            "precision": 0.75,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 888,
            "output_tokens": 14,
            "latency_ms": 756.8,
            "usd": 0.000378,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:cereals_containing_gluten,crustaceans,eggs,fish,peanuts,soybeans",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "crustaceans",
              "eggs",
              "fish",
              "peanuts",
              "set:cereals_containing_gluten",
              "soybeans"
            ],
            "expected": [
              "crustaceans",
              "eggs",
              "fish",
              "peanuts"
            ],
            "precision": 0.6667,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 888,
            "output_tokens": 24,
            "latency_ms": 667.1,
            "usd": 0.000394,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.7833,
      "score_summary": {
        "mean": 0.7833,
        "stdev": 0.0289,
        "sem": 0.0167,
        "ci95_lo": 0.7507,
        "ci95_hi": 0.816,
        "n": 3
      },
      "score_if_valid": 0.7833,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 888.0,
        "output_tokens_mean": 20.333,
        "latency_ms_mean": 708.967,
        "usd_sum": 0.001164
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-007",
      "category": "dietary",
      "capability": "vegan_substitution",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "eb092a2c162960f6",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nName a single plant-based ingredient commonly used to emulsify and thicken a vegan carbonara sauce in place of eggs, while also delivering umami. Respond with one short ingredient name.\n\nINPUT:\n{\n  \"target_role\": \"egg emulsifier and umami in vegan carbonara\"\n}\n\nANSWER FORMAT: exact:string",
      "expected": {
        "value": "silken tofu",
        "aliases": [
          "silken tofu blended with nutritional yeast",
          "soft tofu"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "nutritional_yeast",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "nutritional_yeast",
            "expected": "silken tofu",
            "aliases": [
              "silken tofu blended with nutritional yeast",
              "soft tofu"
            ]
          },
          "metrics": {
            "input_tokens": 880,
            "output_tokens": 6,
            "latency_ms": 641.8,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "nutritional_yeast",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "nutritional_yeast",
            "expected": "silken tofu",
            "aliases": [
              "silken tofu blended with nutritional yeast",
              "soft tofu"
            ]
          },
          "metrics": {
            "input_tokens": 880,
            "output_tokens": 6,
            "latency_ms": 716.7,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "nutritional_yeast",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "nutritional_yeast",
            "expected": "silken tofu",
            "aliases": [
              "silken tofu blended with nutritional yeast",
              "soft tofu"
            ]
          },
          "metrics": {
            "input_tokens": 880,
            "output_tokens": 6,
            "latency_ms": 449.1,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 880.0,
        "output_tokens_mean": 6.0,
        "latency_ms_mean": 602.533,
        "usd_sum": 0.001086
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-008",
      "category": "dietary",
      "capability": "gluten_free_substitution",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "480be99b3f35a51d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nSuggest a single widely-used gluten-free flour to make a béchamel roux that maintains classic texture. Respond with one word.\n\nINPUT:\n{\n  \"target_role\": \"béchamel roux, gluten-free\"\n}\n\nANSWER FORMAT: exact:string",
      "expected": {
        "value": "rice flour",
        "aliases": [
          "cornstarch",
          "corn starch",
          "cornflour",
          "tapioca starch",
          "tapioca flour"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "rice",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "rice",
            "expected": "rice flour",
            "aliases": [
              "cornstarch",
              "corn starch",
              "cornflour",
              "tapioca starch",
              "tapioca flour"
            ]
          },
          "metrics": {
            "input_tokens": 867,
            "output_tokens": 1,
            "latency_ms": 596.2,
            "usd": 0.000348,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "rice",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "rice",
            "expected": "rice flour",
            "aliases": [
              "cornstarch",
              "corn starch",
              "cornflour",
              "tapioca starch",
              "tapioca flour"
            ]
          },
          "metrics": {
            "input_tokens": 867,
            "output_tokens": 1,
            "latency_ms": 508.8,
            "usd": 0.000348,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "rice",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "rice",
            "expected": "rice flour",
            "aliases": [
              "cornstarch",
              "corn starch",
              "cornflour",
              "tapioca starch",
              "tapioca flour"
            ]
          },
          "metrics": {
            "input_tokens": 867,
            "output_tokens": 1,
            "latency_ms": 549.7,
            "usd": 0.000348,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 867.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 551.567,
        "usd_sum": 0.001044
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-009",
      "category": "dietary",
      "capability": "vegan_compatibility",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "936752891554dba3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nFrom this list, return every item that is NOT vegan as a comma-separated list: honey, agave nectar, wine clarified with isinglass, wine clarified with bentonite, carmine, nutritional yeast, shellac, maple syrup, worcestershire sauce (traditional), vegetable glycerin.\n\nINPUT:\n{\n  \"items\": [\n    \"honey\",\n    \"agave nectar\",\n    \"wine clarified with isinglass\",\n    \"wine clarified with bentonite\",\n    \"carmine\",\n    \"nutritional yeast\",\n    \"shellac\",\n    \"maple syrup\",\n    \"worcestershire sauce (traditional)\",\n    \"vegetable glycerin\"\n  ]\n}\n\nANSWER FORMAT: set:comma-separated item names",
      "expected": {
        "values": [
          "honey",
          "wine clarified with isinglass",
          "carmine",
          "shellac",
          "worcestershire sauce (traditional)"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:honey,wine clarified with isinglass,carmine,shellac,worcestershire sauce (traditional)",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "carmine",
              "set:honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "expected": [
              "carmine",
              "honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "precision": 0.8,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 964,
            "output_tokens": 23,
            "latency_ms": 652.2,
            "usd": 0.000422,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:honey,wine clarified with isinglass,carmine,shellac,worcestershire sauce (traditional)",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "carmine",
              "set:honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "expected": [
              "carmine",
              "honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "precision": 0.8,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 964,
            "output_tokens": 23,
            "latency_ms": 767.1,
            "usd": 0.000422,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:honey,wine clarified with isinglass,carmine,shellac,worcestershire sauce (traditional)",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "carmine",
              "set:honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "expected": [
              "carmine",
              "honey",
              "shellac",
              "wine clarified with isinglass",
              "worcestershire sauce (traditional)"
            ],
            "precision": 0.8,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 964,
            "output_tokens": 23,
            "latency_ms": 725.2,
            "usd": 0.000422,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8,
      "score_summary": {
        "mean": 0.8,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.8,
        "ci95_hi": 0.8,
        "n": 3
      },
      "score_if_valid": 0.8,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 964.0,
        "output_tokens_mean": 23.0,
        "latency_ms_mean": 714.833,
        "usd_sum": 0.001266
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-010",
      "category": "dietary",
      "capability": "cross_contact_risk",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "35c29846f76046d7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nA kitchen fries breaded calamari, battered cod, and french fries in a single deep fryer. Return the EU allergens that a customer ordering 'fries only' must be warned about as a comma-separated OGS allergen ID list.\n\nINPUT:\n{\n  \"scenario\": \"single shared fryer\",\n  \"items\": [\n    \"breaded calamari\",\n    \"battered cod\",\n    \"french fries\"\n  ]\n}\n\nANSWER FORMAT: set:comma-separated OGS allergen IDs",
      "expected": {
        "values": [
          "molluscs",
          "fish",
          "cereals_containing_gluten"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:cereals_containing_gluten,crustaceans,fish,molluscs",
          "compliant": true,
          "score": 0.5714,
          "detail": {
            "predicted": [
              "crustaceans",
              "fish",
              "molluscs",
              "set:cereals_containing_gluten"
            ],
            "expected": [
              "cereals_containing_gluten",
              "fish",
              "molluscs"
            ],
            "precision": 0.5,
            "recall": 0.6667
          },
          "metrics": {
            "input_tokens": 916,
            "output_tokens": 19,
            "latency_ms": 1034.1,
            "usd": 0.000397,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:cereals_containing_gluten,crustaceans,fish,molluscs",
          "compliant": true,
          "score": 0.5714,
          "detail": {
            "predicted": [
              "crustaceans",
              "fish",
              "molluscs",
              "set:cereals_containing_gluten"
            ],
            "expected": [
              "cereals_containing_gluten",
              "fish",
              "molluscs"
            ],
            "precision": 0.5,
            "recall": 0.6667
          },
          "metrics": {
            "input_tokens": 916,
            "output_tokens": 19,
            "latency_ms": 704.1,
            "usd": 0.000397,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:crustaceans,cereals_containing_gluten,fish,molluscs",
          "compliant": true,
          "score": 0.8571,
          "detail": {
            "predicted": [
              "cereals_containing_gluten",
              "fish",
              "molluscs",
              "set:crustaceans"
            ],
            "expected": [
              "cereals_containing_gluten",
              "fish",
              "molluscs"
            ],
            "precision": 0.75,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 916,
            "output_tokens": 19,
            "latency_ms": 696.0,
            "usd": 0.000397,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6666,
      "score_summary": {
        "mean": 0.6666,
        "stdev": 0.1649,
        "sem": 0.0952,
        "ci95_lo": 0.48,
        "ci95_hi": 0.8533,
        "n": 3
      },
      "score_if_valid": 0.6666,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 916.0,
        "output_tokens_mean": 19.0,
        "latency_ms_mean": 811.4,
        "usd_sum": 0.001191
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-011",
      "category": "dietary",
      "capability": "low_fodmap",
      "difficulty": "hard",
      "cuisine_region": "global",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "bdd7970997c11172",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nReturn the low-FODMAP violations in this dish as a comma-separated list of ingredient names: garlic-infused olive oil, white onion, lactose-free milk, wheat pasta, lean chicken, fresh chives, honey, aged parmesan.\n\nINPUT:\n{\n  \"ingredients\": [\n    \"garlic-infused olive oil\",\n    \"white onion\",\n    \"lactose-free milk\",\n    \"wheat pasta\",\n    \"lean chicken\",\n    \"fresh chives\",\n    \"honey\",\n    \"aged parmesan\"\n  ]\n}\n\nANSWER FORMAT: set:comma-separated ingredient names",
      "expected": {
        "values": [
          "white onion",
          "wheat pasta",
          "honey"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:white onion,wheat pasta,honey",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "honey",
              "set:white onion",
              "wheat pasta"
            ],
            "expected": [
              "honey",
              "wheat pasta",
              "white onion"
            ],
            "precision": 0.6667,
            "recall": 0.6667
          },
          "metrics": {
            "input_tokens": 933,
            "output_tokens": 8,
            "latency_ms": 550.8,
            "usd": 0.000386,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:white onion,wheat pasta,honey",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "honey",
              "set:white onion",
              "wheat pasta"
            ],
            "expected": [
              "honey",
              "wheat pasta",
              "white onion"
            ],
            "precision": 0.6667,
            "recall": 0.6667
          },
          "metrics": {
            "input_tokens": 933,
            "output_tokens": 8,
            "latency_ms": 410.6,
            "usd": 0.000386,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:white onion,wheat pasta,honey",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "honey",
              "set:white onion",
              "wheat pasta"
            ],
            "expected": [
              "honey",
              "wheat pasta",
              "white onion"
            ],
            "precision": 0.6667,
            "recall": 0.6667
          },
          "metrics": {
            "input_tokens": 933,
            "output_tokens": 8,
            "latency_ms": 535.0,
            "usd": 0.000386,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 933.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 498.8,
        "usd_sum": 0.001158
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:diet-012",
      "category": "dietary",
      "capability": "pescatarian_vs_vegetarian",
      "difficulty": "easy",
      "cuisine_region": "japanese",
      "scorer": "set_f1",
      "context_needs": [
        "rules.dietary",
        "rules.allergens_eu",
        "vocab.dietary_labels"
      ],
      "prompt_template_hash": "42c897d741064981",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n### Dietary labels\n\nvegan, vegetarian, pescatarian, halal, kosher, gluten_free, dairy_free, nut_free, shellfish_free, egg_free, soy_free, low_fodmap\n\n---\n\nA dish contains miso, dashi (made with katsuobushi), tofu, and wakame. Mark which labels it qualifies for out of: vegan, vegetarian, pescatarian. Return a comma-separated list.\n\nINPUT:\n{\n  \"dish\": \"classic awase-dashi miso soup with tofu and wakame\"\n}\n\nANSWER FORMAT: set:comma-separated labels",
      "expected": {
        "values": [
          "pescatarian"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:pescatarian",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "set:pescatarian"
            ],
            "expected": [
              "pescatarian"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 893,
            "output_tokens": 5,
            "latency_ms": 497.4,
            "usd": 0.000365,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:pescatarian",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "set:pescatarian"
            ],
            "expected": [
              "pescatarian"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 893,
            "output_tokens": 5,
            "latency_ms": 549.8,
            "usd": 0.000365,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:vegetarian,pescatarian",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "pescatarian",
              "set:vegetarian"
            ],
            "expected": [
              "pescatarian"
            ],
            "precision": 0.5,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 893,
            "output_tokens": 8,
            "latency_ms": 787.5,
            "usd": 0.00037,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.2222,
      "score_summary": {
        "mean": 0.2222,
        "stdev": 0.3849,
        "sem": 0.2222,
        "ci95_lo": -0.2133,
        "ci95_hi": 0.6578,
        "n": 3
      },
      "score_if_valid": 0.2222,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 893.0,
        "output_tokens_mean": 6.0,
        "latency_ms_mean": 611.567,
        "usd_sum": 0.0011
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-001",
      "category": "menu_engineering",
      "capability": "matrix_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "138c09ed611a2d43",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish has above-average popularity and above-average contribution margin on the menu. Using the OGS menu-engineering matrix, which quadrant is it? Respond with one word.\n\nINPUT:\n{\n  \"popularity\": \"above\",\n  \"margin\": \"above\"\n}\n\nANSWER FORMAT: exact:star|plowhorse|puzzle|dog",
      "expected": {
        "value": "star"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "star",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "star",
            "expected": "star",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 499,
            "output_tokens": 1,
            "latency_ms": 555.9,
            "usd": 0.000201,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "star",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "star",
            "expected": "star",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 499,
            "output_tokens": 1,
            "latency_ms": 616.4,
            "usd": 0.000201,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "star",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "star",
            "expected": "star",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 499,
            "output_tokens": 1,
            "latency_ms": 486.1,
            "usd": 0.000201,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 499.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 552.8,
        "usd_sum": 0.000603
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-002",
      "category": "menu_engineering",
      "capability": "matrix_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "b5df321fd829de76",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish sells strongly but the contribution margin is below the menu average. Classify it.\n\nINPUT:\n{\n  \"popularity\": \"above\",\n  \"margin\": \"below\"\n}\n\nANSWER FORMAT: exact:star|plowhorse|puzzle|dog",
      "expected": {
        "value": "plowhorse"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "plowhorse",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "plowhorse",
            "expected": "plowhorse",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 483,
            "output_tokens": 3,
            "latency_ms": 591.4,
            "usd": 0.000198,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "plowhorse",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "plowhorse",
            "expected": "plowhorse",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 483,
            "output_tokens": 3,
            "latency_ms": 730.6,
            "usd": 0.000198,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "plowhorse",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "plowhorse",
            "expected": "plowhorse",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 483,
            "output_tokens": 3,
            "latency_ms": 531.8,
            "usd": 0.000198,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 483.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 617.933,
        "usd_sum": 0.000594
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-003",
      "category": "menu_engineering",
      "capability": "matrix_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "9795b72bdda437f1",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish has strong contribution margin but is rarely ordered. Classify it.\n\nINPUT:\n{\n  \"popularity\": \"below\",\n  \"margin\": \"above\"\n}\n\nANSWER FORMAT: exact:star|plowhorse|puzzle|dog",
      "expected": {
        "value": "puzzle"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "puzzle",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "puzzle",
            "expected": "puzzle",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 480,
            "output_tokens": 2,
            "latency_ms": 468.8,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "puzzle",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "puzzle",
            "expected": "puzzle",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 480,
            "output_tokens": 2,
            "latency_ms": 610.0,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "puzzle",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "puzzle",
            "expected": "puzzle",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 480,
            "output_tokens": 2,
            "latency_ms": 388.0,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 480.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 488.933,
        "usd_sum": 0.000585
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-004",
      "category": "menu_engineering",
      "capability": "matrix_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "fc0da6dcbbc842fb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish is rarely ordered and has a contribution margin below the menu average. Classify it.\n\nINPUT:\n{\n  \"popularity\": \"below\",\n  \"margin\": \"below\"\n}\n\nANSWER FORMAT: exact:star|plowhorse|puzzle|dog",
      "expected": {
        "value": "dog"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "dog",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "dog",
            "expected": "dog",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 484,
            "output_tokens": 1,
            "latency_ms": 451.8,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "dog",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "dog",
            "expected": "dog",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 484,
            "output_tokens": 1,
            "latency_ms": 558.6,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "dog",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "dog",
            "expected": "dog",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 484,
            "output_tokens": 1,
            "latency_ms": 622.9,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 484.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 544.433,
        "usd_sum": 0.000585
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-005",
      "category": "menu_engineering",
      "capability": "food_cost",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "f33710f88f89121e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish has a food cost of £4.20 and a menu price of £14.00. Return the food-cost percentage as a single integer.\n\nINPUT:\n{\n  \"food_cost\": 4.2,\n  \"menu_price\": 14.0\n}\n\nANSWER FORMAT: numeric:integer percent",
      "expected": {
        "value": 30,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "30",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 30.0,
            "expected": 30.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 493,
            "output_tokens": 1,
            "latency_ms": 480.3,
            "usd": 0.000199,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "30 percent",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 30.0,
            "expected": 30.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 493,
            "output_tokens": 2,
            "latency_ms": 6388.1,
            "usd": 0.0002,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "30",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 30.0,
            "expected": 30.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 493,
            "output_tokens": 1,
            "latency_ms": 395.8,
            "usd": 0.000199,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 493.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 2421.4,
        "usd_sum": 0.000598
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-006",
      "category": "menu_engineering",
      "capability": "contribution_margin",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "7bb019ca9ca4b1be",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA dish has a food cost of $6.50 and a menu price of $22.00. Return the contribution margin in dollars as a single number with two decimals.\n\nINPUT:\n{\n  \"food_cost\": 6.5,\n  \"menu_price\": 22.0\n}\n\nANSWER FORMAT: numeric:decimal USD",
      "expected": {
        "value": 15.5,
        "tolerance": 0.05
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "15.50",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 15.5,
            "expected": 15.5,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 497,
            "output_tokens": 3,
            "latency_ms": 652.0,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "15.50",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 15.5,
            "expected": 15.5,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 497,
            "output_tokens": 3,
            "latency_ms": 685.6,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "15.50",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 15.5,
            "expected": 15.5,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 497,
            "output_tokens": 3,
            "latency_ms": 391.1,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 497.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 576.233,
        "usd_sum": 0.000612
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-007",
      "category": "menu_engineering",
      "capability": "menu_rebalance",
      "difficulty": "hard",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "68b7fb2ec1e432f4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nFive main courses have this (popularity %, margin $) profile: A (35, 18), B (30, 9), C (5, 6), D (20, 16), E (10, 7). Average popularity is 20%; average margin is $11.20. Which single dish is the dog? Respond with the letter only.\n\nINPUT:\n{\n  \"dishes\": [\n    {\n      \"id\": \"A\",\n      \"pop\": 35,\n      \"margin\": 18\n    },\n    {\n      \"id\": \"B\",\n      \"pop\": 30,\n      \"margin\": 9\n    },\n    {\n      \"id\": \"C\",\n      \"pop\": 5,\n      \"margin\": 6\n    },\n    {\n      \"id\": \"D\",\n      \"pop\": 20,\n      \"margin\": 16\n    },\n    {\n      \"id\": \"E\",\n      \"pop\": 10,\n      \"margin\": 7\n    }\n  ],\n  \"avg_pop\": 20,\n  \"avg_margin\": 11.2\n}\n\nANSWER FORMAT: exact:A|B|C|D|E",
      "expected": {
        "value": "C"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:C",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:c",
            "expected": "c",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 675,
            "output_tokens": 2,
            "latency_ms": 581.3,
            "usd": 0.000273,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:C",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:c",
            "expected": "c",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 675,
            "output_tokens": 2,
            "latency_ms": 587.9,
            "usd": 0.000273,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "C",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "c",
            "expected": "c",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 675,
            "output_tokens": 1,
            "latency_ms": 486.8,
            "usd": 0.000272,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 675.0,
        "output_tokens_mean": 1.667,
        "latency_ms_mean": 552.0,
        "usd_sum": 0.000818
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-008",
      "category": "menu_engineering",
      "capability": "target_price",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles"
      ],
      "prompt_template_hash": "dc5bf7ed3a0583c2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nA chef wants a 28% food cost on a dish whose ingredient cost is £3.92. Return the target menu price in pounds, two decimals.\n\nINPUT:\n{\n  \"food_cost\": 3.92,\n  \"target_food_cost_pct\": 28\n}\n\nANSWER FORMAT: numeric:decimal GBP",
      "expected": {
        "value": 14.0,
        "tolerance": 0.05
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "14.00 GBP",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 14.0,
            "expected": 14.0,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 494,
            "output_tokens": 4,
            "latency_ms": 660.1,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "14.00 GBP",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 14.0,
            "expected": 14.0,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 494,
            "output_tokens": 4,
            "latency_ms": 563.6,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "14.00 GBP",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 14.0,
            "expected": 14.0,
            "tolerance": 0.05
          },
          "metrics": {
            "input_tokens": 494,
            "output_tokens": 4,
            "latency_ms": 609.7,
            "usd": 0.000204,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 494.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 611.133,
        "usd_sum": 0.000612
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:menu-009",
      "category": "menu_engineering",
      "capability": "menu_critique",
      "difficulty": "hard",
      "cuisine_region": "global",
      "scorer": "rubric_judge",
      "context_needs": [
        "rules.menu_engineering",
        "vocab.courses",
        "vocab.component_roles",
        "vocab.match_types",
        "rules.dietary",
        "rules.allergens_eu"
      ],
      "prompt_template_hash": "8883cacb0aca5024",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Menu engineering reference\n\nClassical Boston Matrix quadrants (popularity vs. contribution margin, each split at the menu average):\n- **Star**: high popularity, high margin → feature prominently, do   not change.\n- **Plowhorse**: high popularity, low margin → redesign the dish to   cut food cost or reprice gently.\n- **Puzzle**: low popularity, high margin → reposition on the menu,   rename, or add a compelling description.\n- **Dog**: low popularity, low margin → remove or replace.\n\nContribution margin (CM) = menu price − food cost. Food-cost percentage = food cost / menu price. Industry target for casual dining: 28–35% food cost.\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n### OGS Dietary labels and rules\n\nCanonical dietary label IDs (lowercase snake_case):\n`vegan`, `vegetarian`, `pescatarian`, `halal`, `kosher`, `gluten_free`, `dairy_free`, `nut_free`, `shellfish_free`, `egg_free`, `soy_free`, `low_fodmap`\n\nHigh-level rules (non-exhaustive):\n- `vegan`: no animal-derived ingredients, including honey, dairy, eggs, gelatine, carmine, isinglass, shellac, animal rennet, or fish sauce.\n- `vegetarian`: no meat, poultry, fish, shellfish, or animal-derived rennet/gelatine. Dairy and eggs permitted.\n- `pescatarian`: no meat or poultry. Fish and shellfish permitted.\n- `halal`: no pork or pork derivatives, no alcohol, meat must be zabihah-slaughtered. Gelatine must be from halal source.\n- `kosher`: no pork or shellfish, no mixing of meat and dairy, meat must be shechita-slaughtered. Separate utensils for meat/dairy.\n- `gluten_free`: no wheat, barley, rye, or derivatives (including malt, soy sauce with wheat, seitan).\n- `dairy_free`: no milk, butter, cream, cheese, whey, casein, ghee, yogurt, or lactose.\n- `nut_free`: no tree nuts (almond, cashew, walnut, hazelnut, pecan, pistachio, macadamia, brazil, pine nut). Peanuts are legumes but are usually treated as a separate allergen.\n- `shellfish_free`: no crustaceans (shrimp, crab, lobster, crayfish) or molluscs (mussel, clam, oyster, scallop, squid, octopus).\n- `low_fodmap`: restrict high-FODMAP ingredients (onion, garlic, wheat, beans, lactose, honey, most stone fruit).\n\nA recipe is compatible with a label if and only if **all** its ingredients comply with the label's rules.\n\n### EU 14 declarable allergens (Regulation 1169/2011 Annex II)\n\n- `cereals_containing_gluten`\n- `crustaceans`\n- `eggs`\n- `fish`\n- `peanuts`\n- `soybeans`\n- `milk`\n- `tree_nuts`\n- `celery`\n- `mustard`\n- `sesame`\n- `sulphur_dioxide`\n- `lupin`\n- `molluscs`\n\nUse exactly these IDs when declaring allergens in OGS documents.\n\n---\n\nCritique the following five-course autumn tasting menu. Identify at least two concrete weaknesses (flow, seasonality, allergen risk, cost-to-price ratio, or structural monotony), suggest one specific, constructive change, and justify each point in 3 short paragraphs. Avoid generic praise.\n\nINPUT:\n{\n  \"menu\": [\n    {\n      \"course\": \"amuse\",\n      \"dish\": \"pumpkin velouté, sage oil\"\n    },\n    {\n      \"course\": \"appetizer\",\n      \"dish\": \"butternut squash risotto, brown butter, amaretti crumble\"\n    },\n    {\n      \"course\": \"fish\",\n      \"dish\": \"pan-seared scallops, pumpkin purée, crispy pancetta\"\n    },\n    {\n      \"course\": \"main\",\n      \"dish\": \"roast pork loin, sweet potato purée, cider jus\"\n    },\n    {\n      \"course\": \"dessert\",\n      \"dish\": \"pumpkin cheesecake, spiced pecan brittle\"\n    }\n  ],\n  \"target_food_cost_pct\": 30,\n  \"dietary_constraints\": \"must offer a viable vegetarian and a viable nut-free path\"\n}\n\nANSWER FORMAT: free text, 3 short paragraphs",
      "expected": {
        "rubric": [
          {
            "id": "diagnosis",
            "weight": 0.4,
            "description": "Identifies concrete, defensible weaknesses (flavor monotony from repeated pumpkin/squash, nut allergen in amaretti and pecans, heavy starches in consecutive courses).",
            "anchors": {
              "0.0": "No specific weakness identified.",
              "0.5": "Identifies one concrete weakness but misses another major one.",
              "1.0": "Identifies at least two of: pumpkin/squash repetition across 4 of 5 courses; nut allergen across amaretti and pecans; consecutive purée/starch heaviness."
            }
          },
          {
            "id": "remediation",
            "weight": 0.3,
            "description": "Offers a specific, actionable, seasonally-plausible change that addresses the identified weakness.",
            "anchors": {
              "0.0": "No change proposed or change is vague.",
              "0.5": "Change is specific but does not address the main weakness.",
              "1.0": "Concrete substitute with named ingredient/technique (e.g. swap scallop course to cured mackerel with pickled apple)."
            }
          },
          {
            "id": "justification",
            "weight": 0.3,
            "description": "Justification is grounded in OGS concepts (match types, structural balance, service flow) rather than marketing language.",
            "anchors": {
              "0.0": "Pure adjective salad.",
              "0.5": "Some reasoning, but appeals to authority not mechanism.",
              "1.0": "Explicitly cites a mechanism: intensity alignment, contrast, allergen cross-contact, food-cost math."
            }
          }
        ],
        "reference_answer": "Key weaknesses: (1) pumpkin/squash appears in 4 of 5 courses, collapsing the menu's flavour arc into a single beta-carotene/sweet-starch note; (2) the amaretti crumble (bitter almond) and the pecan brittle together block every nut-allergic guest from the tasting; (3) velouté, risotto, two purées and a cheesecake stack four viscous/starchy textures in a row with no acid or raw element. A concrete fix is to replace the scallop course with cured mackerel over pickled Granny Smith apple and wasabi-lime gel: it re-introduces sea/fruit/acid, breaks the texture loop, removes one pork-cross-contact dish, and simultaneously opens a pescatarian-friendly option. Justification: on OGS terms this swap adds a `contrast` match (acid/fat), raises `acidity_perception` mid-meal to refresh the palate before the roast, and removes one of the two tree-nut exposure points."
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "A key weakness of the menu is structural monotony in the use of pumpkin across three courses (amuse, fish, dessert). While autumnal, this repetition risks palate fatigue and limits textural and flavor contrast, reducing overall excitement. The consistent presence of pumpkin in varied forms lacks a bridge or refresh element to diversify the experience.\n\nAnother concern is allergen risk related to nuts and shellfish. The dessert contains pecans, which excludes nut-free diners, and the fish course features scallops and pancetta, which pose shellfish and pork concerns. This complicates delivering truly viable vegetarian and nut-free adaptations that maintain the menu’s thematic cohesion, especially given the rich use of pumpkin and butter-based ingredients which challenge dairy-free options.\n\nA constructive change would be to replace the pumpkin purée in the fish course with a more neutral or contrasting vegetable purée such as parsnip or celeriac, which complements roast pork and sweet potato while lessening pumpkin redundancy. This substitution would reduce structural monotony and expand allergen-friendly flexibility by allowing easier nut-free and vegetarian adaptations, potentially improving menu balance without disrupting the autumnal theme or increasing food costs significantly.",
          "compliant": true,
          "score": 0.551,
          "detail": {
            "per_criterion": [
              {
                "id": "diagnosis",
                "weight": 0.4,
                "score": 0.5,
                "note": "It identifies pumpkin repetition and allergen concerns, but misses the broader squash repetition and the consecutive starch/purée heaviness while adding less relevant shellfish/pork concerns."
              },
              {
                "id": "remediation",
                "weight": 0.3,
                "score": 0.5,
                "note": "The proposed swap from pumpkin purée to parsnip or celeriac is specific and seasonal, but it only partially addresses the menu's main structural and dietary-path problems."
              },
              {
                "id": "justification",
                "weight": 0.3,
                "score": 0.67,
                "note": "The reasoning references palate fatigue, contrast, and food-cost impact, but it stays fairly general and does not clearly explain mechanisms like service flow, cross-contact, or structural balance in depth."
              }
            ],
            "summary": "The answer follows the required three-paragraph free-text format and provides concrete critique rather than generic praise. It correctly spots some monotony and allergen issues, but its diagnosis is incomplete and the fix is too narrow to fully solve the menu's repetition and viable nut-free/vegetarian path problems.",
            "judge_model": "gpt-5.4",
            "judge_prompt_hash": "756db6a487714bca",
            "judge_validated": false
          },
          "metrics": {
            "input_tokens": 1252,
            "output_tokens": 231,
            "latency_ms": 2695.1,
            "usd": 0.00087,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "One key weakness in this menu is the heavy repetition of pumpkin and squash across four of the five courses. While seasonally appropriate for autumn, this creates a structural monotony that lessens the impact of each dish and risks guest fatigue. The pumpkin velouté, pumpkin purée with scallops, butternut squash risotto, and pumpkin cheesecake all revolve around similar flavor profiles and textures, limiting contrast and excitement throughout the progression.\n\nAnother concern is allergen risk and dietary accommodation. The menu includes amaretti crumble (contains nuts), spiced pecan brittle (nuts), and pancetta (not vegetarian or nut-free), which makes achieving a truly nut-free and vegetarian path difficult without significant modifications. There is no clear protein substitute or nut-free dessert alternative proposed, which could alienate guests with these common dietary restrictions.\n\nA constructive change would be to replace the amaretti crumble in the risotto with a toasted seed crumble (e.g., pumpkin or sunflower seeds) that preserves texture but removes nuts, allowing nut-free guests to enjoy the dish safely. For better structural balance and to diversify flavors, consider swapping the pumpkin purée accompaniment for the scallops with a sautéed or pickled autumn vegetable like Brussels sprouts or a parsnip purée, introducing a different vegetal note that complements the seafood and reduces the pumpkin repetition. This would enhance the menu’s flow and broaden its appeal.",
          "compliant": true,
          "score": 0.88,
          "detail": {
            "per_criterion": [
              {
                "id": "diagnosis",
                "weight": 0.4,
                "score": 1.0,
                "note": "It clearly identifies pumpkin/squash repetition and nut-allergen exposure, and also notes structural monotony from similar textures/flavors."
              },
              {
                "id": "remediation",
                "weight": 0.3,
                "score": 0.8,
                "note": "It proposes specific substitutions like seed crumble and Brussels sprouts or parsnip purée, though the fixes are split and less fully integrated into one menu-level change."
              },
              {
                "id": "justification",
                "weight": 0.3,
                "score": 0.8,
                "note": "The reasoning cites guest fatigue, structural monotony, dietary-path blockage, and menu flow, though it is lighter on explicit service-flow or food-cost mechanics."
              }
            ],
            "summary": "The answer follows the requested three-paragraph free-text format and avoids generic praise. It diagnoses multiple concrete weaknesses and offers actionable seasonal fixes with sound operational reasoning, though the justification could be more explicitly framed in technical OGS terms.",
            "judge_model": "gpt-5.4",
            "judge_prompt_hash": "756db6a487714bca",
            "judge_validated": false
          },
          "metrics": {
            "input_tokens": 1252,
            "output_tokens": 277,
            "latency_ms": 3063.1,
            "usd": 0.000944,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "One weakness of this menu is its heavy reliance on pumpkin and closely related autumn squash flavors in multiple courses (amuse, appetizer, fish, dessert). While seasonally appropriate, this repetition risks structural monotony, causing diners to experience less excitement as similar flavor profiles dominate. Introducing more varied seasonal vegetables, like roasted beets or celeriac, could bring different textures and colors, enhancing guest engagement.\n\nAnother concern is allergen risk, particularly due to the amaretti crumble and spiced pecan brittle in the appetizer and dessert, respectively. Both contain tree nuts, which precludes a truly nut-free path unless these components are replaced or omitted. This clashes with the requirement for a viable nut-free option. Moreover, the use of brown butter and pancetta excludes vegans and potentially those with dairy or pork restrictions, limiting inclusivity.\n\nA constructive change would be to swap the amaretti crumble in the risotto for toasted bread crumbs or a nut-free seed crumble, maintaining texture without allergen risk. Similarly, replacing the spiced pecan brittle with a spiced oat crumble or caramelized seeds provides the same crunchy contrast at dessert. These swaps preserve the autumn theme and mouthfeel while enabling compliance with nut-free and vegetarian paths, broadening the menu’s accessibility and market appeal.",
          "compliant": true,
          "score": 0.82,
          "detail": {
            "per_criterion": [
              {
                "id": "diagnosis",
                "weight": 0.4,
                "score": 1.0,
                "note": "It clearly identifies two major weaknesses from the rubric: repeated pumpkin/squash across the menu and tree-nut allergen exposure in both appetizer and dessert."
              },
              {
                "id": "remediation",
                "weight": 0.3,
                "score": 0.7,
                "note": "It proposes specific substitutions for the nut components, but does not offer a broader structural fix for the menu's monotony or heaviness."
              },
              {
                "id": "justification",
                "weight": 0.3,
                "score": 0.7,
                "note": "The reasoning references structural monotony and allergen exclusion mechanisms, though it is lighter on service-flow, texture sequencing, and cost or intensity mechanics."
              }
            ],
            "summary": "The answer follows the requested free-text format and provides three short paragraphs with concrete critique rather than generic praise. It diagnoses key issues well and gives actionable allergen-focused fixes, but it only partially addresses the menu's broader flow and texture heaviness.",
            "judge_model": "gpt-5.4",
            "judge_prompt_hash": "756db6a487714bca",
            "judge_validated": false
          },
          "metrics": {
            "input_tokens": 1252,
            "output_tokens": 256,
            "latency_ms": 3413.4,
            "usd": 0.00091,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.7503,
      "score_summary": {
        "mean": 0.7503,
        "stdev": 0.1752,
        "sem": 0.1012,
        "ci95_lo": 0.5521,
        "ci95_hi": 0.9486,
        "n": 3
      },
      "score_if_valid": 0.7503,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1252.0,
        "output_tokens_mean": 254.667,
        "latency_ms_mean": 3057.2,
        "usd_sum": 0.002724
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-ar-001",
      "category": "vocabulary",
      "capability": "ingredient_classification",
      "difficulty": "easy",
      "cuisine_region": "middle_eastern",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.ingredient_categories"
      ],
      "prompt_template_hash": "94e19d81d80368a1",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n---\n\nباستخدام مفردات OGS لتصنيفات المكونات، صنّف الزعتر بكلمة واحدة.\n\nINPUT:\n{\n  \"item\": \"زعتر\"\n}\n\nANSWER FORMAT: one_of:protein,vegetable,fruit,grain,dairy,fat,herb,spice,fungus,legume,nut,seed,sweetener,acid,salt,other",
      "expected": {
        "value": "herb",
        "aliases": [
          "spice"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "herb",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herb",
            "expected": "herb",
            "aliases": [
              "spice"
            ]
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 599.3,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "herb",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herb",
            "expected": "herb",
            "aliases": [
              "spice"
            ]
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 487.4,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "herb",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herb",
            "expected": "herb",
            "aliases": [
              "spice"
            ]
          },
          "metrics": {
            "input_tokens": 343,
            "output_tokens": 2,
            "latency_ms": 748.7,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 343.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 611.8,
        "usd_sum": 0.00042
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-ar-002",
      "category": "regional",
      "capability": "region_identification",
      "difficulty": "easy",
      "cuisine_region": "middle_eastern",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "6b39ec376be689ad",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nالمنسف هو الطبق الوطني لأي بلد عربي؟ أجب بكلمة واحدة.\n\nINPUT:\n{\n  \"dish\": \"المنسف\"\n}\n\nANSWER FORMAT: exact:country",
      "expected": {
        "value": "jordan",
        "aliases": [
          "الأردن",
          "jordan (الأردن)"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:jordan",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:jordan",
            "expected": "jordan",
            "aliases": [
              "الأردن",
              "jordan (الأردن)"
            ]
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 3,
            "latency_ms": 562.6,
            "usd": 0.000109,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:jordan",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:jordan",
            "expected": "jordan",
            "aliases": [
              "الأردن",
              "jordan (الأردن)"
            ]
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 3,
            "latency_ms": 538.6,
            "usd": 0.000109,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:jordan",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:jordan",
            "expected": "jordan",
            "aliases": [
              "الأردن",
              "jordan (الأردن)"
            ]
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 3,
            "latency_ms": 602.0,
            "usd": 0.000109,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 260.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 567.733,
        "usd_sum": 0.000327
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-es-001",
      "category": "regional",
      "capability": "region_identification",
      "difficulty": "easy",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "5fe5c7f1f85e3675",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nEl pulpo a feira es un plato tradicional de qué región de España? Responde con una palabra.\n\nINPUT:\n{\n  \"dish\": \"pulpo a feira\"\n}\n\nANSWER FORMAT: exact:region name",
      "expected": {
        "value": "galicia",
        "aliases": [
          "galícia",
          "galiza"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:Galicia",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:galicia",
            "expected": "galicia",
            "aliases": [
              "galícia",
              "galiza"
            ]
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 525.7,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:galicia",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:galicia",
            "expected": "galicia",
            "aliases": [
              "galícia",
              "galiza"
            ]
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 652.4,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:galicia",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:galicia",
            "expected": "galicia",
            "aliases": [
              "galícia",
              "galiza"
            ]
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 561.5,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 264.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 579.867,
        "usd_sum": 0.000336
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-es-002",
      "category": "vocabulary",
      "capability": "cooking_method_classification",
      "difficulty": "easy",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "9f03b90711bec40f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nUsando el vocabulario OGS de métodos de cocción, ¿cuál describe mejor la preparación clásica del cochinillo segoviano? Responde con una palabra.\n\nINPUT:\n{\n  \"item\": \"cochinillo segoviano\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "roasted"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 397,
            "output_tokens": 2,
            "latency_ms": 477.4,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 397,
            "output_tokens": 2,
            "latency_ms": 472.9,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 397,
            "output_tokens": 2,
            "latency_ms": 734.0,
            "usd": 0.000162,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 397.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 561.433,
        "usd_sum": 0.000486
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-fr-001",
      "category": "regional",
      "capability": "region_identification",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "8d252ae3fb170e15",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nLa bouillabaisse est une préparation traditionnelle de quelle ville française ? Répondez par un mot.\n\nINPUT:\n{\n  \"dish\": \"bouillabaisse\"\n}\n\nANSWER FORMAT: exact:city name",
      "expected": {
        "value": "marseille"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:Marseille",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:marseille",
            "expected": "marseille",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 582.7,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:Marseille",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:marseille",
            "expected": "marseille",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 585.3,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:marseille",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:marseille",
            "expected": "marseille",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 264,
            "output_tokens": 4,
            "latency_ms": 476.8,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 264.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 548.267,
        "usd_sum": 0.000336
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-fr-002",
      "category": "sensory",
      "capability": "basic_tastes_profile",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "96a07aeab44536ab",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nPour une sauce beurre blanc classique, donnez le profil sur l'échelle OGS des cinq saveurs [sweet, sour, salty, bitter, umami] sous la forme d'un tableau JSON de cinq nombres entre 0 et 10.\n\nINPUT:\n{\n  \"dish\": \"sauce beurre blanc classique\"\n}\n\nANSWER FORMAT: json:array of 5 numbers",
      "expected": {
        "values": [
          1,
          5,
          4,
          1,
          3
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[2, 6, 3, 0, 2]",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 2,
                "expected": 1,
                "score": 0.3333
              },
              {
                "index": 1,
                "predicted": 6,
                "expected": 5,
                "score": 0.3333
              },
              {
                "index": 2,
                "predicted": 3,
                "expected": 4,
                "score": 0.3333
              },
              {
                "index": 3,
                "predicted": 0,
                "expected": 1,
                "score": 0.3333
              },
              {
                "index": 4,
                "predicted": 2,
                "expected": 3,
                "score": 0.3333
              }
            ],
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 373,
            "output_tokens": 15,
            "latency_ms": 683.1,
            "usd": 0.000173,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[1, 6, 4, 0.5, 3]",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 1,
                "expected": 1,
                "score": 1.0
              },
              {
                "index": 1,
                "predicted": 6,
                "expected": 5,
                "score": 0.3333
              },
              {
                "index": 2,
                "predicted": 4,
                "expected": 4,
                "score": 1.0
              },
              {
                "index": 3,
                "predicted": 0.5,
                "expected": 1,
                "score": 0.6667
              },
              {
                "index": 4,
                "predicted": 3,
                "expected": 3,
                "score": 1.0
              }
            ],
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 373,
            "output_tokens": 17,
            "latency_ms": 684.1,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[1.5, 7, 3, 0, 2]",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 1.5,
                "expected": 1,
                "score": 0.6667
              },
              {
                "index": 1,
                "predicted": 7,
                "expected": 5,
                "score": 0.0
              },
              {
                "index": 2,
                "predicted": 3,
                "expected": 4,
                "score": 0.3333
              },
              {
                "index": 3,
                "predicted": 0,
                "expected": 1,
                "score": 0.3333
              },
              {
                "index": 4,
                "predicted": 2,
                "expected": 3,
                "score": 0.3333
              }
            ],
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 373,
            "output_tokens": 17,
            "latency_ms": 687.4,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.4889,
      "score_summary": {
        "mean": 0.4889,
        "stdev": 0.2694,
        "sem": 0.1556,
        "ci95_lo": 0.184,
        "ci95_hi": 0.7938,
        "n": 3
      },
      "score_if_valid": 0.4889,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 373.0,
        "output_tokens_mean": 16.333,
        "latency_ms_mean": 684.867,
        "usd_sum": 0.000525
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-it-001",
      "category": "regional",
      "capability": "region_identification",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "c3aae88ebfcbe08b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nI pici sono una pasta fresca tipica di quale regione italiana? Rispondi con una parola.\n\nINPUT:\n{\n  \"dish\": \"pici cacio e pepe\"\n}\n\nANSWER FORMAT: exact:regione",
      "expected": {
        "value": "toscana"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "\"Toscana\"",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "toscana",
            "expected": "toscana",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 266,
            "output_tokens": 4,
            "latency_ms": 511.6,
            "usd": 0.000113,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:toscana",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:toscana",
            "expected": "toscana",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 266,
            "output_tokens": 4,
            "latency_ms": 1007.8,
            "usd": 0.000113,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:toscana",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:toscana",
            "expected": "toscana",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 266,
            "output_tokens": 4,
            "latency_ms": 584.4,
            "usd": 0.000113,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 266.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 701.267,
        "usd_sum": 0.000339
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-it-002",
      "category": "pairing",
      "capability": "beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.beverage_types",
        "vocab.match_types"
      ],
      "prompt_template_hash": "30919f613b1fbcd6",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nScegli l'abbinamento più classico per l'ossobuco alla milanese fra: barolo, riesling, prosecco, lambrusco. Rispondi con una parola.\n\nINPUT:\n{\n  \"dish\": \"ossobuco alla milanese\"\n}\n\nANSWER FORMAT: exact:one of the given options",
      "expected": {
        "value": "barolo"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "barolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "barolo",
            "expected": "barolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 2,
            "latency_ms": 613.1,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "barolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "barolo",
            "expected": "barolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 2,
            "latency_ms": 503.0,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "barolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "barolo",
            "expected": "barolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 2,
            "latency_ms": 1344.9,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 329.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 820.333,
        "usd_sum": 0.000405
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-ja-001",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "easy",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "ff1f43723314705c",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nOGSの香りファミリー一覧から、柚子（ゆず）の主たる香りファミリーを一語で答えてください。\n\nINPUT:\n{\n  \"item\": \"柚子\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "fruit"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 1090.1,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 952.5,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 908.7,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 333.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 983.767,
        "usd_sum": 0.000405
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:ml-ja-002",
      "category": "pairing",
      "capability": "beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.beverage_types",
        "vocab.match_types"
      ],
      "prompt_template_hash": "f68d4882814e0ecf",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\n握り寿司（赤身・白身・光物）のコース全体に最も安全に合う飲料のタイプを、次から一つ選んでください: sake, red_wine, stout_beer, bourbon.\n\nINPUT:\n{\n  \"dish\": \"握り寿司コース\"\n}\n\nANSWER FORMAT: exact:one of the given options",
      "expected": {
        "value": "sake"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "sake",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sake",
            "expected": "sake",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 339,
            "output_tokens": 2,
            "latency_ms": 1090.8,
            "usd": 0.000139,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "sake",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sake",
            "expected": "sake",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 339,
            "output_tokens": 2,
            "latency_ms": 949.8,
            "usd": 0.000139,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "sake",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sake",
            "expected": "sake",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 339,
            "output_tokens": 2,
            "latency_ms": 1189.7,
            "usd": 0.000139,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 339.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 1076.767,
        "usd_sum": 0.000417
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-001",
      "category": "nutrition",
      "capability": "energy_calculation",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "94fadc6f5c8c5968",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA portion contains 40 g carbohydrate, 20 g protein, 15 g fat. Return total energy in kcal as a single integer, using the EU Annex XIV macro factors.\n\nINPUT:\n{\n  \"carb_g\": 40,\n  \"protein_g\": 20,\n  \"fat_g\": 15\n}\n\nANSWER FORMAT: numeric:integer kcal",
      "expected": {
        "value": 375,
        "tolerance": 5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "numeric:435 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 435.0,
            "expected": 375.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 816,
            "output_tokens": 4,
            "latency_ms": 1218.8,
            "usd": 0.000333,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "685 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 685.0,
            "expected": 375.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 816,
            "output_tokens": 2,
            "latency_ms": 1299.1,
            "usd": 0.00033,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:415 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 415.0,
            "expected": 375.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 816,
            "output_tokens": 4,
            "latency_ms": 1133.4,
            "usd": 0.000333,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 816.0,
        "output_tokens_mean": 3.333,
        "latency_ms_mean": 1217.1,
        "usd_sum": 0.000996
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-002",
      "category": "nutrition",
      "capability": "energy_calculation",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "4b141daaadd9af57",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA 150 mL glass of red wine contains approximately 14 g alcohol, 4 g carbohydrate, no protein, no fat. Return energy in kcal as a single integer.\n\nINPUT:\n{\n  \"carb_g\": 4,\n  \"protein_g\": 0,\n  \"fat_g\": 0,\n  \"alcohol_g\": 14\n}\n\nANSWER FORMAT: numeric:integer kcal",
      "expected": {
        "value": 114,
        "tolerance": 5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "numeric:126 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 126.0,
            "expected": 114.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 825,
            "output_tokens": 4,
            "latency_ms": 1009.2,
            "usd": 0.000336,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "numeric:105 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 105.0,
            "expected": 114.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 825,
            "output_tokens": 4,
            "latency_ms": 736.1,
            "usd": 0.000336,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:123 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 123.0,
            "expected": 114.0,
            "tolerance": 5.0
          },
          "metrics": {
            "input_tokens": 825,
            "output_tokens": 4,
            "latency_ms": 974.4,
            "usd": 0.000336,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 825.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 906.567,
        "usd_sum": 0.001008
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-003",
      "category": "nutrition",
      "capability": "per_100g_estimate",
      "difficulty": "medium",
      "cuisine_region": "mediterranean",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "518ee70ef096f9ec",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA simple olive-oil vinaigrette is 75 g olive oil + 25 g red wine vinegar (vinegar ≈ 20 kcal/100 g). Return energy per 100 g of finished dressing as a single integer kcal.\n\nINPUT:\n{\n  \"olive_oil_g\": 75,\n  \"vinegar_g\": 25\n}\n\nANSWER FORMAT: numeric:integer kcal per 100 g",
      "expected": {
        "value": 668,
        "tolerance": 15
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "669",
          "compliant": true,
          "score": 0.9333,
          "detail": {
            "predicted": 669.0,
            "expected": 668.0,
            "tolerance": 15.0
          },
          "metrics": {
            "input_tokens": 826,
            "output_tokens": 1,
            "latency_ms": 1215.3,
            "usd": 0.000332,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "numeric:666 kcal per 100 g",
          "compliant": true,
          "score": 0.8667,
          "detail": {
            "predicted": 666.0,
            "expected": 668.0,
            "tolerance": 15.0
          },
          "metrics": {
            "input_tokens": 826,
            "output_tokens": 8,
            "latency_ms": 1059.9,
            "usd": 0.000343,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "669",
          "compliant": true,
          "score": 0.9333,
          "detail": {
            "predicted": 669.0,
            "expected": 668.0,
            "tolerance": 15.0
          },
          "metrics": {
            "input_tokens": 826,
            "output_tokens": 1,
            "latency_ms": 1102.8,
            "usd": 0.000332,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.9111,
      "score_summary": {
        "mean": 0.9111,
        "stdev": 0.0385,
        "sem": 0.0222,
        "ci95_lo": 0.8676,
        "ci95_hi": 0.9546,
        "n": 3
      },
      "score_if_valid": 0.9111,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 826.0,
        "output_tokens_mean": 3.333,
        "latency_ms_mean": 1126.0,
        "usd_sum": 0.001007
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-004",
      "category": "nutrition",
      "capability": "protein_estimate",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "42947503c82b308d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nEstimate the protein (g) in a 180 g cooked portion of skinless chicken-breast filet assuming 30% cooking mass loss. Respond with one integer.\n\nINPUT:\n{\n  \"raw_mass_g_target_cooked\": 180,\n  \"cooking_loss\": 0.3\n}\n\nANSWER FORMAT: numeric:integer g",
      "expected": {
        "value": 59,
        "tolerance": 4
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "numeric:59 g",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 59.0,
            "expected": 59.0,
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 4,
            "latency_ms": 452.5,
            "usd": 0.000331,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "numeric:33 g",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 33.0,
            "expected": 59.0,
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 4,
            "latency_ms": 1290.6,
            "usd": 0.000331,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:33 g",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 33.0,
            "expected": 59.0,
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 4,
            "latency_ms": 1219.3,
            "usd": 0.000331,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 812.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 987.467,
        "usd_sum": 0.000993
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-005",
      "category": "nutrition",
      "capability": "macro_vector",
      "difficulty": "hard",
      "cuisine_region": "italian",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "375ca5d44d346632",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA portion contains 85 g dry pasta (75 C / 13 P / 1.5 F per 100 g), 10 g butter (0.1 C / 0.9 P / 81 F per 100 g) and 15 g parmesan (3.2 C / 35 P / 29 F per 100 g). Return [carb_g, protein_g, fat_g, kcal] for the portion as a JSON array of four numbers.\n\nINPUT:\n{\n  \"pasta_g\": 85,\n  \"butter_g\": 10,\n  \"parmesan_g\": 15\n}\n\nANSWER FORMAT: json:array [carb, protein, fat, kcal]",
      "expected": {
        "values": [
          64.3,
          16.3,
          13.7,
          446
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[66.05, 13.675, 14.63, 498]",
          "compliant": true,
          "score": 0.3079,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 66.05,
                "expected": 64.3,
                "score": 0.4167
              },
              {
                "index": 1,
                "predicted": 13.675,
                "expected": 16.3,
                "score": 0.125
              },
              {
                "index": 2,
                "predicted": 14.63,
                "expected": 13.7,
                "score": 0.69
              },
              {
                "index": 3,
                "predicted": 498,
                "expected": 446,
                "score": 0.0
              }
            ],
            "tolerance": 3.0
          },
          "metrics": {
            "input_tokens": 890,
            "output_tokens": 18,
            "latency_ms": 849.6,
            "usd": 0.000385,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[66.07, 16.97, 16.6, 479]",
          "compliant": true,
          "score": 0.305,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 66.07,
                "expected": 64.3,
                "score": 0.41
              },
              {
                "index": 1,
                "predicted": 16.97,
                "expected": 16.3,
                "score": 0.7767
              },
              {
                "index": 2,
                "predicted": 16.6,
                "expected": 13.7,
                "score": 0.0333
              },
              {
                "index": 3,
                "predicted": 479,
                "expected": 446,
                "score": 0.0
              }
            ],
            "tolerance": 3.0
          },
          "metrics": {
            "input_tokens": 890,
            "output_tokens": 18,
            "latency_ms": 882.1,
            "usd": 0.000385,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[65.4, 15.9, 14.9, 536]",
          "compliant": true,
          "score": 0.525,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 65.4,
                "expected": 64.3,
                "score": 0.6333
              },
              {
                "index": 1,
                "predicted": 15.9,
                "expected": 16.3,
                "score": 0.8667
              },
              {
                "index": 2,
                "predicted": 14.9,
                "expected": 13.7,
                "score": 0.6
              },
              {
                "index": 3,
                "predicted": 536,
                "expected": 446,
                "score": 0.0
              }
            ],
            "tolerance": 3.0
          },
          "metrics": {
            "input_tokens": 890,
            "output_tokens": 18,
            "latency_ms": 796.8,
            "usd": 0.000385,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3793,
      "score_summary": {
        "mean": 0.3793,
        "stdev": 0.1262,
        "sem": 0.0729,
        "ci95_lo": 0.2365,
        "ci95_hi": 0.5221,
        "n": 3
      },
      "score_if_valid": 0.3793,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 890.0,
        "output_tokens_mean": 18.0,
        "latency_ms_mean": 842.833,
        "usd_sum": 0.001155
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-006",
      "category": "nutrition",
      "capability": "energy_calculation",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "1ba6175ed99ad486",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA mocktail uses 180 mL fresh orange juice (approx 9 g carbohydrate per 100 g) plus 12 g granulated sugar. Return kcal as a single integer.\n\nINPUT:\n{\n  \"oj_ml\": 180,\n  \"sugar_g\": 12\n}\n\nANSWER FORMAT: numeric:integer kcal",
      "expected": {
        "value": 111,
        "tolerance": 6
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "89",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 89.0,
            "expected": 111.0,
            "tolerance": 6.0
          },
          "metrics": {
            "input_tokens": 809,
            "output_tokens": 1,
            "latency_ms": 1128.0,
            "usd": 0.000325,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "164 kcal",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 164.0,
            "expected": 111.0,
            "tolerance": 6.0
          },
          "metrics": {
            "input_tokens": 809,
            "output_tokens": 2,
            "latency_ms": 1215.4,
            "usd": 0.000327,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "72",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 72.0,
            "expected": 111.0,
            "tolerance": 6.0
          },
          "metrics": {
            "input_tokens": 809,
            "output_tokens": 1,
            "latency_ms": 995.3,
            "usd": 0.000325,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 809.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 1112.9,
        "usd_sum": 0.000977
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-007",
      "category": "nutrition",
      "capability": "fibre_rule",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "b9e5625ed0828c94",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nWhat kcal/g factor does the OGS nutrition reference assign to dietary fibre? Respond with a single number.\n\nANSWER FORMAT: numeric:decimal kcal/g",
      "expected": {
        "value": 2,
        "tolerance": 0.1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.1
          },
          "metrics": {
            "input_tokens": 773,
            "output_tokens": 3,
            "latency_ms": 1183.5,
            "usd": 0.000314,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.1
          },
          "metrics": {
            "input_tokens": 773,
            "output_tokens": 3,
            "latency_ms": 955.3,
            "usd": 0.000314,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.1
          },
          "metrics": {
            "input_tokens": 773,
            "output_tokens": 3,
            "latency_ms": 1008.8,
            "usd": 0.000314,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 773.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 1049.2,
        "usd_sum": 0.000942
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-008",
      "category": "nutrition",
      "capability": "kj_conversion",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "95ef839d58e96bbc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nConvert 250 kcal to kJ using the canonical OGS/EU conversion factor. Respond with a single integer.\n\nINPUT:\n{\n  \"kcal\": 250\n}\n\nANSWER FORMAT: numeric:integer kJ",
      "expected": {
        "value": 1046,
        "tolerance": 10
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "1046",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 1046.0,
            "expected": 1046.0,
            "tolerance": 10.0
          },
          "metrics": {
            "input_tokens": 787,
            "output_tokens": 2,
            "latency_ms": 1143.3,
            "usd": 0.000318,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "1046 kJ",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 1046.0,
            "expected": 1046.0,
            "tolerance": 10.0
          },
          "metrics": {
            "input_tokens": 787,
            "output_tokens": 4,
            "latency_ms": 1785.2,
            "usd": 0.000321,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "1046 kJ",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 1046.0,
            "expected": 1046.0,
            "tolerance": 10.0
          },
          "metrics": {
            "input_tokens": 787,
            "output_tokens": 4,
            "latency_ms": 622.2,
            "usd": 0.000321,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 787.0,
        "output_tokens_mean": 3.333,
        "latency_ms_mean": 1183.567,
        "usd_sum": 0.00096
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-009",
      "category": "nutrition",
      "capability": "macro_vector",
      "difficulty": "hard",
      "cuisine_region": "italian",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "641416c8b33c6dbc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nA simple Alfredo sauce is 100 g butter + 150 g heavy cream (36% fat: 2.8 C / 2 P / 37 F) + 60 g parmesan (3.2 C / 35 P / 29 F), reduced to 250 g finished weight. Return [carb_per100g, protein_per100g, fat_per100g, kcal_per100g] as a JSON array of four numbers.\n\nINPUT:\n{\n  \"butter_g\": 100,\n  \"cream_g\": 150,\n  \"parmesan_g\": 60,\n  \"finished_g\": 250\n}\n\nANSWER FORMAT: json:array [carb, protein, fat, kcal] per 100 g",
      "expected": {
        "values": [
          2.5,
          9.6,
          55.2,
          545
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[4.08, 15.7, 35.8, 407]",
          "compliant": true,
          "score": 0.1512,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 4.08,
                "expected": 2.5,
                "score": 0.605
              },
              {
                "index": 1,
                "predicted": 15.7,
                "expected": 9.6,
                "score": 0.0
              },
              {
                "index": 2,
                "predicted": 35.8,
                "expected": 55.2,
                "score": 0.0
              },
              {
                "index": 3,
                "predicted": 407,
                "expected": 545,
                "score": 0.0
              }
            ],
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 895,
            "output_tokens": 18,
            "latency_ms": 944.9,
            "usd": 0.000387,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[6.36,12.02,38.94,438]",
          "compliant": true,
          "score": 0.1075,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 6.36,
                "expected": 2.5,
                "score": 0.035
              },
              {
                "index": 1,
                "predicted": 12.02,
                "expected": 9.6,
                "score": 0.395
              },
              {
                "index": 2,
                "predicted": 38.94,
                "expected": 55.2,
                "score": 0.0
              },
              {
                "index": 3,
                "predicted": 438,
                "expected": 545,
                "score": 0.0
              }
            ],
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 895,
            "output_tokens": 15,
            "latency_ms": 715.2,
            "usd": 0.000382,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[3.64, 18.6, 43.02, 485]",
          "compliant": true,
          "score": 0.1787,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 3.64,
                "expected": 2.5,
                "score": 0.715
              },
              {
                "index": 1,
                "predicted": 18.6,
                "expected": 9.6,
                "score": 0.0
              },
              {
                "index": 2,
                "predicted": 43.02,
                "expected": 55.2,
                "score": 0.0
              },
              {
                "index": 3,
                "predicted": 485,
                "expected": 545,
                "score": 0.0
              }
            ],
            "tolerance": 4.0
          },
          "metrics": {
            "input_tokens": 895,
            "output_tokens": 18,
            "latency_ms": 782.6,
            "usd": 0.000387,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.1458,
      "score_summary": {
        "mean": 0.1458,
        "stdev": 0.0359,
        "sem": 0.0207,
        "ci95_lo": 0.1052,
        "ci95_hi": 0.1864,
        "n": 3
      },
      "score_if_valid": 0.1458,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 895.0,
        "output_tokens_mean": 17.0,
        "latency_ms_mean": 814.233,
        "usd_sum": 0.001156
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:nut-010",
      "category": "nutrition",
      "capability": "numeric_compare",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.nutrition"
      ],
      "prompt_template_hash": "fd7edc20921469ff",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS nutrition reference (per-100 g macro densities)\n\nMacronutrient energy values (EU Regulation 1169/2011 Annex XIV):\n- carbohydrate: **4 kcal/g** (17 kJ/g)\n- protein: **4 kcal/g** (17 kJ/g)\n- fat: **9 kcal/g** (37 kJ/g)\n- alcohol: **7 kcal/g** (29 kJ/g)\n- fibre: **2 kcal/g** (8 kJ/g)\n\nExpress nutrition values per 100 g of the finished component unless explicitly stated otherwise. Round kcal to the nearest integer.\n\nCommon ingredient macros (per 100 g, approximate, for reasoning only):\n- olive oil: 0 C / 0 P / 100 F → 884 kcal\n- butter: 0.1 C / 0.9 P / 81 F → 717 kcal\n- chicken breast (raw, skinless): 0 C / 23 P / 1.2 F → 110 kcal\n- salmon (raw, Atlantic): 0 C / 20 P / 13 F → 208 kcal\n- dry pasta: 75 C / 13 P / 1.5 F → 371 kcal\n- cooked white rice: 28 C / 2.7 P / 0.3 F → 130 kcal\n- granulated sugar: 100 C / 0 P / 0 F → 387 kcal\n- all-purpose flour: 76 C / 10 P / 1 F → 364 kcal\n- whole milk: 4.8 C / 3.4 P / 3.3 F → 61 kcal\n- heavy cream (36%): 2.8 C / 2 P / 37 F → 345 kcal\n- eggs (whole, raw): 1.1 C / 12.6 P / 9.5 F → 143 kcal\n- parmesan: 3.2 C / 35 P / 29 F → 420 kcal\n- tomato (raw): 3.9 C / 0.9 P / 0.2 F → 18 kcal\n- onion (raw): 9 C / 1.1 P / 0.1 F → 40 kcal\n\nWhen summing nutrition across a recipe, scale each ingredient by its mass in grams, then divide by total finished mass to get per-100 g values. Assume no mass loss from evaporation unless the task says otherwise.\n\n---\n\nFrom the set {olive oil, butter, granulated sugar, dry pasta}, return the ingredient with the highest kcal per gram. Respond with one short name.\n\nINPUT:\n{\n  \"set\": [\n    \"olive oil\",\n    \"butter\",\n    \"granulated sugar\",\n    \"dry pasta\"\n  ]\n}\n\nANSWER FORMAT: exact:string",
      "expected": {
        "value": "olive oil",
        "aliases": [
          "olive-oil"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:olive oil",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:olive oil",
            "expected": "olive oil",
            "aliases": [
              "olive-oil"
            ]
          },
          "metrics": {
            "input_tokens": 813,
            "output_tokens": 4,
            "latency_ms": 819.3,
            "usd": 0.000332,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:olive oil",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:olive oil",
            "expected": "olive oil",
            "aliases": [
              "olive-oil"
            ]
          },
          "metrics": {
            "input_tokens": 813,
            "output_tokens": 4,
            "latency_ms": 466.9,
            "usd": 0.000332,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:olive oil",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:olive oil",
            "expected": "olive oil",
            "aliases": [
              "olive-oil"
            ]
          },
          "metrics": {
            "input_tokens": 813,
            "output_tokens": 4,
            "latency_ms": 516.8,
            "usd": 0.000332,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 813.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 601.0,
        "usd_sum": 0.000996
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-001",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "easy",
      "cuisine_region": "american",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "3fa6a67d5bfe7f1a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage option for the dish below from the candidates. Respond with just the candidate id (e.g. 'a').\n\nINPUT:\n{\n  \"dish\": \"60-day dry-aged ribeye, medium-rare, served with peppercorn jus\",\n  \"candidates\": {\n    \"a\": \"young Barolo DOCG (high tannin, high acidity, full body)\",\n    \"b\": \"off-dry German Kabinett Riesling\",\n    \"c\": \"unoaked Muscadet sur lie\",\n    \"d\": \"sweet Pedro Ximenez sherry\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "b",
          "d",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 607.4,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 594.6,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 1256.7,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 378.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 819.567,
        "usd_sum": 0.000468
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-002",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "4b85899cd6206acb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below from the candidates. Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"fresh Kumamoto oysters on the half shell, lemon and mignonette\",\n  \"candidates\": {\n    \"a\": \"oaked California Chardonnay (14.5% ABV, buttery)\",\n    \"b\": \"Muscadet Sevre-et-Maine sur lie\",\n    \"c\": \"Amarone della Valpolicella\",\n    \"d\": \"bourbon old fashioned\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "b",
        "ranking": [
          "b",
          "a",
          "d",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 1,
            "latency_ms": 590.3,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 3,
            "latency_ms": 568.8,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 3,
            "latency_ms": 562.0,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 365.0,
        "output_tokens_mean": 2.333,
        "latency_ms_mean": 573.7,
        "usd_sum": 0.00045
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-003",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "mexican",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "41ed3ee840823502",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below. Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"classic Chengdu-style mapo tofu (heavy doubanjiang, Sichuan peppercorn, ground pork, high chilli heat)\",\n  \"candidates\": {\n    \"a\": \"15% ABV Zinfandel\",\n    \"b\": \"off-dry Riesling Kabinett (~10% ABV)\",\n    \"c\": \"Islay single malt Scotch (Lagavulin 16)\",\n    \"d\": \"Brunello di Montalcino Riserva\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "b",
        "ranking": [
          "b",
          "d",
          "a",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 650.8,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 501.3,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 378,
            "output_tokens": 3,
            "latency_ms": 454.6,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 378.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 535.567,
        "usd_sum": 0.000468
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-004",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "82a4b41d0e3d5668",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below. Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"warm goat cheese crouton on mesclun with walnut vinaigrette\",\n  \"candidates\": {\n    \"a\": \"Sancerre (Sauvignon Blanc, Loire)\",\n    \"b\": \"Napa Cabernet Sauvignon Reserve\",\n    \"c\": \"Vintage Port\",\n    \"d\": \"lagered pilsner\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "d",
          "b",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 573.3,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 457.9,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 667.3,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 349.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 566.167,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-005",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "441705237fe65e54",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below. Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"warm dark chocolate fondant (70% cacao), molten centre, vanilla ice cream\",\n  \"candidates\": {\n    \"a\": \"dry Sancerre Sauvignon Blanc\",\n    \"b\": \"vintage Port\",\n    \"c\": \"bone-dry Brut Nature Champagne\",\n    \"d\": \"Muscadet Sevre-et-Maine\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "b",
        "ranking": [
          "b",
          "c",
          "a",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 638.3,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 519.3,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:b",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "b",
            "expected_best": "b",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 661.2,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 352.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 606.267,
        "usd_sum": 0.000438
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-006",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "b195ee1dfe7968ab",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage to pair across a premium edomae sushi omakase. Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"premium edomae sushi omakase (akami, toro, kohada, ikura, uni, anago)\",\n  \"candidates\": {\n    \"a\": \"Junmai Daiginjo sake (well-chilled)\",\n    \"b\": \"heavily oaked California Chardonnay\",\n    \"c\": \"Barolo Riserva\",\n    \"d\": \"Islay single malt\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "b",
          "d",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 636.0,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 573.0,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 606.9,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 370.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 605.3,
        "usd_sum": 0.000459
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-007",
      "category": "pairing",
      "capability": "regional_pairing",
      "difficulty": "hard",
      "cuisine_region": "spanish",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "81bf14f26f4fa761",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for a classic paella valenciana (rabbit, chicken, garrofó beans, saffron). Respond with just the candidate id.\n\nINPUT:\n{\n  \"dish\": \"authentic paella valenciana\",\n  \"candidates\": {\n    \"a\": \"young Rioja crianza (Tempranillo blend, light oak)\",\n    \"b\": \"Amarone della Valpolicella Riserva\",\n    \"c\": \"Islay peated single malt\",\n    \"d\": \"off-dry Moscato d'Asti\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "b",
          "d",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 6327.4,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 686.6,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 713.5,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 371.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 2575.833,
        "usd_sum": 0.000459
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-008",
      "category": "pairing",
      "capability": "pairing_grade_reasoning",
      "difficulty": "hard",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.grade_scale"
      ],
      "prompt_template_hash": "51ad14586a063779",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n---\n\nA pairing's overall_score is computed as 68. Using the OGS grade scale, return the letter grade (ogs-match.md §5). Respond with just the grade token.\n\nINPUT:\n{\n  \"overall_score\": 68\n}\n\nANSWER FORMAT: one_of:A+,A,B+,B,C+,C,D,F",
      "expected": {
        "value": "D"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "D",
            "expected": "D",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 384,
            "output_tokens": 1,
            "latency_ms": 952.7,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "D",
            "expected": "D",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 384,
            "output_tokens": 1,
            "latency_ms": 640.4,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "D",
            "expected": "D",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 384,
            "output_tokens": 1,
            "latency_ms": 770.1,
            "usd": 0.000155,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 384.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 787.733,
        "usd_sum": 0.000465
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-009",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "d7ade64ce48bfbe4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below from the candidates. Respond with just the candidate id (e.g. 'a').\n\nINPUT:\n{\n  \"dish\": \"classic Neapolitan pizza Margherita (tomato, buffalo mozzarella, basil)\",\n  \"candidates\": {\n    \"a\": \"bright Chianti Classico (Sangiovese, high acid, medium tannin)\",\n    \"b\": \"heavily oaked California Chardonnay\",\n    \"c\": \"sweet ice wine\",\n    \"d\": \"Islay peated whisky\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "b",
          "d",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 387.2,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 643.4,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 3,
            "latency_ms": 513.4,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 371.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 514.667,
        "usd_sum": 0.000459
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-010",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "chinese",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "637717fdf3577204",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below.\n\nINPUT:\n{\n  \"dish\": \"mapo tofu with doubanjiang and Sichuan peppercorn, medium-hot\",\n  \"candidates\": {\n    \"a\": \"off-dry German Riesling Spätlese\",\n    \"b\": \"high-alcohol (15%) Zinfandel\",\n    \"c\": \"bone-dry Fino sherry\",\n    \"d\": \"straight rye whiskey\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "c",
          "d",
          "b"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 663.5,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 627.1,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 399.7,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 350.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 563.433,
        "usd_sum": 0.000435
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-011",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "indian",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "2b655f8b1ea64df8",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish below.\n\nINPUT:\n{\n  \"dish\": \"butter chicken (murgh makhani) in rich tomato-cream sauce with fenugreek\",\n  \"candidates\": {\n    \"a\": \"off-dry Gewurztraminer\",\n    \"b\": \"young tannic Cabernet Sauvignon\",\n    \"c\": \"unoaked Muscadet\",\n    \"d\": \"Islay single malt\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "c",
          "b",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 661.4,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 588.1,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 471.9,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 346.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 573.8,
        "usd_sum": 0.000429
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-012",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "ethiopian",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "3fe344c62017694c",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for the dish.\n\nINPUT:\n{\n  \"dish\": \"doro wat: chicken stewed in berbere-spiced ghee, served with injera\",\n  \"candidates\": {\n    \"a\": \"juicy, low-tannin Beaujolais (Gamay) served slightly chilled\",\n    \"b\": \"high-alcohol Amarone\",\n    \"c\": \"bone-dry sparkling Prosecco Brut\",\n    \"d\": \"heavily peated Islay whisky\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "c",
          "b",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 564.1,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 496.0,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 522.9,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 361.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 527.667,
        "usd_sum": 0.000447
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-013",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "argentinian",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "4d30c6e439c9c85f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage.\n\nINPUT:\n{\n  \"dish\": \"grilled ribeye over wood embers, served with chimichurri\",\n  \"candidates\": {\n    \"a\": \"old-vine Mendoza Malbec, medium-plus tannin, black fruit\",\n    \"b\": \"unoaked Chablis\",\n    \"c\": \"off-dry Moscato\",\n    \"d\": \"dry Manzanilla sherry\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "d",
          "b",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 556.0,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 454.9,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 590.5,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 347.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 533.8,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-014",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "hard",
      "cuisine_region": "japanese",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "848f37f91777acfb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for otoro nigiri topped with a brush of soy.\n\nINPUT:\n{\n  \"dish\": \"bluefin tuna otoro nigiri, brushed soy\",\n  \"candidates\": {\n    \"a\": \"junmai daiginjo sake, chilled\",\n    \"b\": \"young Chinon Cabernet Franc\",\n    \"c\": \"dry Riesling Kabinett\",\n    \"d\": \"sweet Sauternes\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "c",
          "b",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 583.6,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 625.4,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 346,
            "output_tokens": 3,
            "latency_ms": 397.3,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 346.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 535.433,
        "usd_sum": 0.000429
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-015",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "hard",
      "cuisine_region": "moroccan",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "f5ecf9dcd59f0d0a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage.\n\nINPUT:\n{\n  \"dish\": \"slow-cooked lamb shoulder tagine with preserved lemon and green olives\",\n  \"candidates\": {\n    \"a\": \"Southern Rhône Grenache-Syrah blend (Gigondas), moderate tannin\",\n    \"b\": \"unoaked Muscadet\",\n    \"c\": \"Champagne Blanc de Blancs\",\n    \"d\": \"young high-tannin Barolo\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "d",
          "c",
          "b"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 410.4,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 615.1,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 3,
            "latency_ms": 464.4,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 347.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 496.633,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-016",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "vietnamese",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "3dd7c0170877ac9b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for a bowl of Vietnamese beef pho.\n\nINPUT:\n{\n  \"dish\": \"pho bo: beef broth with star anise, cinnamon, rice noodles, rare beef, basil, lime\",\n  \"candidates\": {\n    \"a\": \"cold Vietnamese lager-style beer (e.g. Saigon or 333)\",\n    \"b\": \"tannic Cabernet Sauvignon\",\n    \"c\": \"heavily oaked Chardonnay\",\n    \"d\": \"sweet Tokaji Aszú 5 puttonyos\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "b",
          "c",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 366,
            "output_tokens": 3,
            "latency_ms": 487.2,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 366,
            "output_tokens": 3,
            "latency_ms": 635.7,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 366,
            "output_tokens": 3,
            "latency_ms": 564.4,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 366.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 562.433,
        "usd_sum": 0.000453
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-017",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "8f60336584f33baf",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for 24-month aged Comté served on its own.\n\nINPUT:\n{\n  \"dish\": \"24-month aged Comté, served at room temperature\",\n  \"candidates\": {\n    \"a\": \"Jura Vin Jaune (Savagnin, oxidative, nutty)\",\n    \"b\": \"young oaky Cabernet Sauvignon\",\n    \"c\": \"dry Muscadet\",\n    \"d\": \"Sauternes\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "d",
          "b",
          "c"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 571.9,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 607.2,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 651.0,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 349.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 610.033,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-018",
      "category": "pairing",
      "capability": "regional_pairing",
      "difficulty": "medium",
      "cuisine_region": "thai",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "9209873bc05779b7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage for a fragrant, moderately spicy Thai green curry with chicken and Thai basil.\n\nINPUT:\n{\n  \"dish\": \"Thai green curry with chicken, Thai basil, kaffir lime\",\n  \"candidates\": {\n    \"a\": \"off-dry New Zealand Sauvignon Blanc with residual sugar\",\n    \"b\": \"heavy Port wine\",\n    \"c\": \"young tannic Bordeaux\",\n    \"d\": \"Islay peated whisky\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "c",
          "b",
          "d"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 542.2,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 412.6,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 350,
            "output_tokens": 3,
            "latency_ms": 671.9,
            "usd": 0.000145,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 350.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 542.233,
        "usd_sum": 0.000435
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-019",
      "category": "pairing",
      "capability": "best_beverage_selection",
      "difficulty": "medium",
      "cuisine_region": "mexican",
      "scorer": "ranked_selection",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "da735293a9d3102b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nPick the single best beverage.\n\nINPUT:\n{\n  \"dish\": \"Oaxacan mole negro enchiladas (rich chocolate-chile-spice sauce over chicken)\",\n  \"candidates\": {\n    \"a\": \"medium-tannin Garnacha from Aragón\",\n    \"b\": \"unoaked Muscadet\",\n    \"c\": \"very sweet late-harvest Riesling\",\n    \"d\": \"bone-dry Brut Champagne\"\n  }\n}\n\nANSWER FORMAT: one_of:a,b,c,d",
      "expected": {
        "value": "a",
        "ranking": [
          "a",
          "d",
          "c",
          "b"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 3,
            "latency_ms": 585.9,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 1,
            "latency_ms": 455.6,
            "usd": 0.00014,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "one_of:a",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a",
            "expected_best": "a",
            "rank": 0
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 3,
            "latency_ms": 420.7,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 345.0,
        "output_tokens_mean": 2.333,
        "latency_ms_mean": 487.4,
        "usd_sum": 0.000426
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:pair-020",
      "category": "pairing",
      "capability": "pairing_grade_reasoning",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.grade_scale"
      ],
      "prompt_template_hash": "bac251ebb1c064ef",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n---\n\nA pairing's overall_score is computed as 96. Using the OGS grade scale, return the letter grade.\n\nINPUT:\n{\n  \"overall_score\": 96\n}\n\nANSWER FORMAT: one_of:A+,A,B+,B,C+,C,D,F",
      "expected": {
        "value": "A+"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "A+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a+",
            "expected": "a+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 2,
            "latency_ms": 459.9,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "A+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a+",
            "expected": "a+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 2,
            "latency_ms": 513.2,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "A+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "a+",
            "expected": "a+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 2,
            "latency_ms": 751.7,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 371.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 574.933,
        "usd_sum": 0.000456
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-001",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "1df867dd3a55af5b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhat is the ONLY grape variety permitted in Barolo DOCG? Respond with just the variety name.\n\nANSWER FORMAT: string",
      "expected": {
        "value": "nebbiolo"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 3,
            "latency_ms": 910.3,
            "usd": 0.000103,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 3,
            "latency_ms": 789.5,
            "usd": 0.000103,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 3,
            "latency_ms": 606.8,
            "usd": 0.000103,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 246.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 768.867,
        "usd_sum": 0.000309
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-002",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "24a95f8a1c0a0a45",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhat single grape variety is used for Chablis AOC? Respond with just the variety name.\n\nANSWER FORMAT: string",
      "expected": {
        "value": "chardonnay"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "chardonnay",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "chardonnay",
            "expected": "chardonnay",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 2,
            "latency_ms": 774.6,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "chardonnay",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "chardonnay",
            "expected": "chardonnay",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 2,
            "latency_ms": 432.4,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "chardonnay",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "chardonnay",
            "expected": "chardonnay",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 246,
            "output_tokens": 2,
            "latency_ms": 577.5,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 246.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 594.833,
        "usd_sum": 0.000306
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-003",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "easy",
      "cuisine_region": "vietnamese",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "15d36148f8b853e6",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhich country is phở originally from? Respond with just the country name.\n\nANSWER FORMAT: string",
      "expected": {
        "value": "vietnam"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Vietnam",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "vietnam",
            "expected": "vietnam",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 241,
            "output_tokens": 1,
            "latency_ms": 555.0,
            "usd": 9.8e-05,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Vietnam",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "vietnam",
            "expected": "vietnam",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 241,
            "output_tokens": 1,
            "latency_ms": 607.6,
            "usd": 9.8e-05,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Vietnam",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "vietnam",
            "expected": "vietnam",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 241,
            "output_tokens": 1,
            "latency_ms": 553.6,
            "usd": 9.8e-05,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 241.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 572.067,
        "usd_sum": 0.000294
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-004",
      "category": "regional",
      "capability": "classic_regional_pairing",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "e332af036a2bd591",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the classic Alsace wine traditionally paired with ripe, stinky Muenster cheese. One word, grape variety only.\n\nANSWER FORMAT: string",
      "expected": {
        "value": "gewurztraminer"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Gewurztraminer",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "gewurztraminer",
            "expected": "gewurztraminer",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 251,
            "output_tokens": 4,
            "latency_ms": 548.1,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Gewurztraminer",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "gewurztraminer",
            "expected": "gewurztraminer",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 251,
            "output_tokens": 4,
            "latency_ms": 486.9,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Gewurztraminer",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "gewurztraminer",
            "expected": "gewurztraminer",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 251,
            "output_tokens": 4,
            "latency_ms": 955.3,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 251.0,
        "output_tokens_mean": 4.0,
        "latency_ms_mean": 663.433,
        "usd_sum": 0.000321
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-005",
      "category": "regional",
      "capability": "french_classical_cuisine",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "4aa2c63ffdeeacfb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the French mother sauce that is made from a brown roux, brown stock and mirepoix, traditionally enriched with tomato. One word, lowercase.\n\nANSWER FORMAT: string",
      "expected": {
        "value": "espagnole"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "espagnole",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "espagnole",
            "expected": "espagnole",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 3,
            "latency_ms": 753.0,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "espagnole",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "espagnole",
            "expected": "espagnole",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 3,
            "latency_ms": 900.4,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "espagnole",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "espagnole",
            "expected": "espagnole",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 3,
            "latency_ms": 531.1,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 257.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 728.167,
        "usd_sum": 0.000324
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-006",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "medium",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "979e7579b34d9797",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhat is the name of the official Rioja classification for a red wine aged at least 2 years (1 in oak) and 3+ years total? Respond with just the term.\n\nINPUT:\n{\n  \"question\": \"official Rioja classification for red wine aged 2+ years (1+ in oak), 3+ total\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "reserva"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Crianza",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "crianza",
            "expected": "reserva",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 295,
            "output_tokens": 3,
            "latency_ms": 889.0,
            "usd": 0.000123,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Crianza",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "crianza",
            "expected": "reserva",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 295,
            "output_tokens": 3,
            "latency_ms": 681.7,
            "usd": 0.000123,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Reserva",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "reserva",
            "expected": "reserva",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 295,
            "output_tokens": 1,
            "latency_ms": 600.7,
            "usd": 0.00012,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 295.0,
        "output_tokens_mean": 2.333,
        "latency_ms_mean": 723.8,
        "usd_sum": 0.000366
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-007",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "easy",
      "cuisine_region": "korean",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "23a69016b1082553",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nFrom which country does kimchi originate? Respond with just the country name in English.\n\nINPUT:\n{\n  \"dish\": \"kimchi\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "korea",
        "aliases": [
          "south korea",
          "republic of korea"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Korea",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "korea",
            "expected": "korea",
            "aliases": [
              "south korea",
              "republic of korea"
            ]
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 4064.4,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Korea",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "korea",
            "expected": "korea",
            "aliases": [
              "south korea",
              "republic of korea"
            ]
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 628.0,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Korea",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "korea",
            "expected": "korea",
            "aliases": [
              "south korea",
              "republic of korea"
            ]
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 562.0,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 256.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 1751.467,
        "usd_sum": 0.000318
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-008",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "hard",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "0a12eb057eb17654",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhat is the single grape variety required for Barbaresco DOCG?\n\nINPUT:\n{\n  \"wine\": \"Barbaresco DOCG\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "nebbiolo"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 3,
            "latency_ms": 1032.2,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 3,
            "latency_ms": 560.8,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "nebbiolo",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "nebbiolo",
            "expected": "nebbiolo",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 3,
            "latency_ms": 937.0,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 256.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 843.333,
        "usd_sum": 0.000321
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-009",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "medium",
      "cuisine_region": "peruvian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "767d178c3cce3333",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhich country is classic ceviche most commonly attributed to? Respond with the country name.\n\nINPUT:\n{\n  \"dish\": \"ceviche\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "peru"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Peru",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "peru",
            "expected": "peru",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 721.2,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Peru",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "peru",
            "expected": "peru",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 554.7,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Peru",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "peru",
            "expected": "peru",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 256,
            "output_tokens": 2,
            "latency_ms": 485.2,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 256.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 587.033,
        "usd_sum": 0.000318
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-010",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "easy",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "1001169bc92aaacd",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nIn which country does paella valenciana originate? Respond with the country name.\n\nINPUT:\n{\n  \"dish\": \"paella valenciana\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "spain"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Spain",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "spain",
            "expected": "spain",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 1,
            "latency_ms": 504.7,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Spain",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "spain",
            "expected": "spain",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 1,
            "latency_ms": 485.9,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Spain",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "spain",
            "expected": "spain",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 1,
            "latency_ms": 626.7,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 257.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 539.1,
        "usd_sum": 0.000312
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-011",
      "category": "regional",
      "capability": "classic_regional_pairing",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "571e5074e66e6a11",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the classic French white wine region traditionally paired with raw oysters. Respond with just the region name.\n\nINPUT:\n{\n  \"dish\": \"raw oysters on the half shell\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "chablis",
        "aliases": [
          "muscadet"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "muscadet",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "muscadet",
            "expected": "chablis",
            "aliases": [
              "muscadet"
            ]
          },
          "metrics": {
            "input_tokens": 263,
            "output_tokens": 3,
            "latency_ms": 536.1,
            "usd": 0.00011,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "muscadet",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "muscadet",
            "expected": "chablis",
            "aliases": [
              "muscadet"
            ]
          },
          "metrics": {
            "input_tokens": 263,
            "output_tokens": 3,
            "latency_ms": 489.3,
            "usd": 0.00011,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "muscadet",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "muscadet",
            "expected": "chablis",
            "aliases": [
              "muscadet"
            ]
          },
          "metrics": {
            "input_tokens": 263,
            "output_tokens": 3,
            "latency_ms": 426.1,
            "usd": 0.00011,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 263.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 483.833,
        "usd_sum": 0.00033
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-012",
      "category": "regional",
      "capability": "french_classical_cuisine",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "d9792be4d5ca7335",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the French mother sauce made from clarified butter, egg yolks, and lemon juice.\n\nINPUT:\n{\n  \"description\": \"emulsion of clarified butter, egg yolks, and lemon juice\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "hollandaise"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "holandaise",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "holandaise",
            "expected": "hollandaise",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 268,
            "output_tokens": 3,
            "latency_ms": 667.2,
            "usd": 0.000112,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Hollandaise",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "hollandaise",
            "expected": "hollandaise",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 268,
            "output_tokens": 4,
            "latency_ms": 538.1,
            "usd": 0.000114,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Hollandaise",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "hollandaise",
            "expected": "hollandaise",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 268,
            "output_tokens": 4,
            "latency_ms": 463.4,
            "usd": 0.000114,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 268.0,
        "output_tokens_mean": 3.667,
        "latency_ms_mean": 556.233,
        "usd_sum": 0.00034
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-013",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "c831a091be5025f9",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nIn which country did edomae-style sushi originate?\n\nINPUT:\n{\n  \"dish\": \"edomae sushi\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "japan"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "japan",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "japan",
            "expected": "japan",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 252,
            "output_tokens": 2,
            "latency_ms": 491.1,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Japan",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "japan",
            "expected": "japan",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 252,
            "output_tokens": 1,
            "latency_ms": 692.4,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Japan",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "japan",
            "expected": "japan",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 252,
            "output_tokens": 1,
            "latency_ms": 781.4,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 252.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 654.967,
        "usd_sum": 0.000308
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-014",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "medium",
      "cuisine_region": "moroccan",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "7d761cbfb86342af",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nIn which country does the slow-cooked dish tagine originate?\n\nINPUT:\n{\n  \"dish\": \"tagine (clay-pot slow-cooked stew)\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "morocco"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Morocco",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "morocco",
            "expected": "morocco",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 2,
            "latency_ms": 922.0,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Morocco",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "morocco",
            "expected": "morocco",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 2,
            "latency_ms": 571.5,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Morocco",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "morocco",
            "expected": "morocco",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 260,
            "output_tokens": 2,
            "latency_ms": 762.3,
            "usd": 0.000107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 260.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 751.933,
        "usd_sum": 0.000321
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-015",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "2f35d24bb9d58118",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhat single grape variety is used for white Sancerre AOC?\n\nINPUT:\n{\n  \"wine\": \"Sancerre AOC blanc\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "sauvignon blanc",
        "aliases": [
          "sauvignon_blanc",
          "sauvignon"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "SauvignonBlanc",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "sauvignonblanc",
            "expected": "sauvignon blanc",
            "aliases": [
              "sauvignon_blanc",
              "sauvignon"
            ]
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 5,
            "latency_ms": 700.8,
            "usd": 0.000111,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "sauvignon_blanc",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sauvignon_blanc",
            "expected": "sauvignon blanc",
            "aliases": [
              "sauvignon_blanc",
              "sauvignon"
            ]
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 5,
            "latency_ms": 522.5,
            "usd": 0.000111,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "sauvignon_blanc",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sauvignon_blanc",
            "expected": "sauvignon blanc",
            "aliases": [
              "sauvignon_blanc",
              "sauvignon"
            ]
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 5,
            "latency_ms": 656.4,
            "usd": 0.000111,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 257.0,
        "output_tokens_mean": 5.0,
        "latency_ms_mean": 626.567,
        "usd_sum": 0.000333
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-016",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "easy",
      "cuisine_region": "mexican",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "efb489c56faf463a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nIn which country does mole poblano originate?\n\nINPUT:\n{\n  \"dish\": \"mole poblano\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "mexico"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Mexico",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mexico",
            "expected": "mexico",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 250,
            "output_tokens": 1,
            "latency_ms": 417.7,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "mexico",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mexico",
            "expected": "mexico",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 250,
            "output_tokens": 2,
            "latency_ms": 604.8,
            "usd": 0.000103,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Mexico",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "mexico",
            "expected": "mexico",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 250,
            "output_tokens": 1,
            "latency_ms": 493.5,
            "usd": 0.000102,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 250.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 505.333,
        "usd_sum": 0.000307
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-017",
      "category": "regional",
      "capability": "classic_regional_pairing",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "53530e4634162afc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the Alsatian white wine traditionally served with choucroute garnie.\n\nINPUT:\n{\n  \"dish\": \"choucroute garnie\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "riesling"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "riesling",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "riesling",
            "expected": "riesling",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 2,
            "latency_ms": 603.0,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "riesling",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "riesling",
            "expected": "riesling",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 2,
            "latency_ms": 750.8,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "riesling",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "riesling",
            "expected": "riesling",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 257,
            "output_tokens": 2,
            "latency_ms": 417.2,
            "usd": 0.000106,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 257.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 590.333,
        "usd_sum": 0.000318
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-018",
      "category": "regional",
      "capability": "dish_origin",
      "difficulty": "medium",
      "cuisine_region": "brazilian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "28a636bdc82633de",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nWhich country claims feijoada completa as a national dish?\n\nINPUT:\n{\n  \"dish\": \"feijoada completa\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "brazil"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "brazil",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "brazil",
            "expected": "brazil",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 253,
            "output_tokens": 2,
            "latency_ms": 581.9,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "brazil",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "brazil",
            "expected": "brazil",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 253,
            "output_tokens": 2,
            "latency_ms": 359.6,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "brazil",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "brazil",
            "expected": "brazil",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 253,
            "output_tokens": 2,
            "latency_ms": 592.9,
            "usd": 0.000104,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 253.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 511.467,
        "usd_sum": 0.000312
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-019",
      "category": "regional",
      "capability": "wine_region_knowledge",
      "difficulty": "hard",
      "cuisine_region": "hungarian",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "2c5869ce479f65cc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nTokaji Aszú's vineyards are centred in a single country. Name that country in English.\n\nINPUT:\n{\n  \"wine\": \"Tokaji Aszú\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "hungary"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "Hungary",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "hungary",
            "expected": "hungary",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 262,
            "output_tokens": 2,
            "latency_ms": 604.6,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "Hungary",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "hungary",
            "expected": "hungary",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 262,
            "output_tokens": 2,
            "latency_ms": 564.8,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "Hungary",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "hungary",
            "expected": "hungary",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 262,
            "output_tokens": 2,
            "latency_ms": 337.0,
            "usd": 0.000108,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 262.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 502.133,
        "usd_sum": 0.000324
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:region-020",
      "category": "regional",
      "capability": "classic_regional_pairing",
      "difficulty": "hard",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [],
      "prompt_template_hash": "aa3761bdbf2d490e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n---\n\nName the Galician white wine grape traditionally paired with pulpo á feira (boiled octopus with pimentón).\n\nINPUT:\n{\n  \"dish\": \"pulpo á feira — Galician boiled octopus with pimentón and olive oil\"\n}\n\nANSWER FORMAT: one_word",
      "expected": {
        "value": "albarino",
        "aliases": [
          "albariño"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "albariño",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "albariño",
            "expected": "albarino",
            "aliases": [
              "albariño"
            ]
          },
          "metrics": {
            "input_tokens": 278,
            "output_tokens": 3,
            "latency_ms": 677.2,
            "usd": 0.000116,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "albariño",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "albariño",
            "expected": "albarino",
            "aliases": [
              "albariño"
            ]
          },
          "metrics": {
            "input_tokens": 278,
            "output_tokens": 3,
            "latency_ms": 452.8,
            "usd": 0.000116,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "albariño",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "albariño",
            "expected": "albarino",
            "aliases": [
              "albariño"
            ]
          },
          "metrics": {
            "input_tokens": 278,
            "output_tokens": 3,
            "latency_ms": 472.6,
            "usd": 0.000116,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 278.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 534.2,
        "usd_sum": 0.000348
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-001",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "08845fcf7f8beb09",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"salmon and tuna nigiri\",\n  \"beverage\": \"young Bordeaux Left Bank (Cabernet Sauvignon, high tannin)\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_TANNIN_FISH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_TANNIN_FISH",
            "expected": "RISK_TANNIN_FISH",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 495.2,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_TANNIN_FISH",
            "expected": "RISK_TANNIN_FISH",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 567.0,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_TANNIN_FISH",
            "expected": "RISK_TANNIN_FISH",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 503.5,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 637.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 521.9,
        "usd_sum": 0.000798
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-002",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "7a188b424515258f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"Thai green curry with bird's eye chilli, high heat\",\n  \"beverage\": \"15.5% ABV California Zinfandel\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SPICE_ALCOHOL"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SPICE_ALCOHOL",
            "expected": "RISK_SPICE_ALCOHOL",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 7,
            "latency_ms": 438.2,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SPICE_ALCOHOL",
            "expected": "RISK_SPICE_ALCOHOL",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 7,
            "latency_ms": 676.1,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SPICE_ALCOHOL",
            "expected": "RISK_SPICE_ALCOHOL",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 7,
            "latency_ms": 842.5,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 639.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 652.267,
        "usd_sum": 0.000801
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-003",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "a60fa6dd5c1ff452",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code for the pairing. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"delicate flounder sashimi with ponzu\",\n  \"beverage\": \"heavily new-oaked California Chardonnay, 14% ABV, buttery ML\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_OAK_MASKS"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_OAK_MASKS",
            "expected": "RISK_OAK_MASKS",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 6,
            "latency_ms": 532.6,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_OAK_MASKS",
            "expected": "RISK_OAK_MASKS",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 6,
            "latency_ms": 634.1,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_OAK_MASKS",
            "expected": "RISK_OAK_MASKS",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 6,
            "latency_ms": 432.8,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 642.0,
        "output_tokens_mean": 6.0,
        "latency_ms_mean": 533.167,
        "usd_sum": 0.000798
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-004",
      "category": "risk",
      "capability": "risk_severity",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "16286de7fea14ae1",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nUsing OGS risk severity levels (low/medium/high), classify the RISK_ALCOHOL_OVERPOWERS severity for the pairing below. Respond with just the severity token.\n\nINPUT:\n{\n  \"dish\": \"pan-seared halibut with beurre blanc, very delicate flavours\",\n  \"beverage\": \"15.5% ABV Amarone della Valpolicella\"\n}\n\nANSWER FORMAT: one_of:low,medium,high",
      "expected": {
        "value": "high"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 604,
            "output_tokens": 1,
            "latency_ms": 434.5,
            "usd": 0.000243,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 604,
            "output_tokens": 1,
            "latency_ms": 525.3,
            "usd": 0.000243,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 604,
            "output_tokens": 1,
            "latency_ms": 598.7,
            "usd": 0.000243,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 604.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 519.5,
        "usd_sum": 0.000729
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-005",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "cd44f8ac361108e8",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code for the pairing. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"caramel panna cotta with honey\",\n  \"beverage\": \"bone-dry Brut Nature Champagne (zero dosage)\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SWEETNESS_CONFLICT"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SWEETNESS_CONFLICT",
            "expected": "RISK_SWEETNESS_CONFLICT",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 631,
            "output_tokens": 8,
            "latency_ms": 603.5,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SWEETNESS_CONFLICT",
            "expected": "RISK_SWEETNESS_CONFLICT",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 631,
            "output_tokens": 8,
            "latency_ms": 488.1,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "RISK_SWEETNESS_CONFLICT",
            "expected": "RISK_SWEETNESS_CONFLICT",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 631,
            "output_tokens": 8,
            "latency_ms": 689.9,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 631.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 593.833,
        "usd_sum": 0.000795
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-006",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "easy",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "28cb271326a709c7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"raw tuna sashimi with a light soy-wasabi dip\",\n  \"beverage\": \"young tannic Napa Cabernet Sauvignon\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_TANNIN_FISH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 817.8,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 677.2,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 637,
            "output_tokens": 7,
            "latency_ms": 553.5,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 637.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 682.833,
        "usd_sum": 0.000798
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-007",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "thai",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "93c46eac23eca249",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"spicy Thai green curry with chilies and coconut milk\",\n  \"beverage\": \"15.5% abv California Zinfandel\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SPICE_ALCOHOL"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 640,
            "output_tokens": 7,
            "latency_ms": 626.4,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 640,
            "output_tokens": 7,
            "latency_ms": 648.8,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 640,
            "output_tokens": 7,
            "latency_ms": 476.1,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 640.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 583.767,
        "usd_sum": 0.000801
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-008",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "2409e606629eebf9",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"grilled escarole salad with bitter radicchio and walnuts\",\n  \"beverage\": \"bone-dry bitter amontillado sherry\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_BITTER_AMPLIFY"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 7,
            "latency_ms": 794.6,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 7,
            "latency_ms": 468.0,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 642,
            "output_tokens": 7,
            "latency_ms": 490.4,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 642.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 584.333,
        "usd_sum": 0.000804
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-009",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "a00498fd7fce7cb7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"raw scallop crudo with yuzu\",\n  \"beverage\": \"heavily oaked California Chardonnay, new French barrels\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_OAK_MASKS"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_oak_masks",
            "expected": "risk_oak_masks",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 638,
            "output_tokens": 6,
            "latency_ms": 516.2,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_oak_masks",
            "expected": "risk_oak_masks",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 638,
            "output_tokens": 6,
            "latency_ms": 449.0,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_OAK_MASKS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_oak_masks",
            "expected": "risk_oak_masks",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 638,
            "output_tokens": 6,
            "latency_ms": 422.8,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 638.0,
        "output_tokens_mean": 6.0,
        "latency_ms_mean": 462.667,
        "usd_sum": 0.000795
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-010",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "b28af5fc40407ce0",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"warm clear beef consommé\",\n  \"beverage\": \"sweet late-harvest Moscato d'Asti\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SWEETNESS_CONFLICT"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 8,
            "latency_ms": 703.4,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 8,
            "latency_ms": 1101.0,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 8,
            "latency_ms": 536.8,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 634.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 780.4,
        "usd_sum": 0.000798
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-011",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "fba6f85afa81595f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"frisée salad with aggressive Dijon vinaigrette (1:2 vinegar to oil)\",\n  \"beverage\": \"bone-dry Brut Nature Champagne\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_ACID_CLASH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_ACID_CLASH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_acid_clash",
            "expected": "risk_acid_clash",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 641,
            "output_tokens": 6,
            "latency_ms": 574.3,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_ACID_CLASH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_acid_clash",
            "expected": "risk_acid_clash",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 641,
            "output_tokens": 6,
            "latency_ms": 560.1,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_ACID_CLASH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_acid_clash",
            "expected": "risk_acid_clash",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 641,
            "output_tokens": 6,
            "latency_ms": 545.0,
            "usd": 0.000266,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 641.0,
        "output_tokens_mean": 6.0,
        "latency_ms_mean": 559.8,
        "usd_sum": 0.000798
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-012",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "easy",
      "cuisine_region": "italian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "2f3a4360a39afdc9",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"steamed turbot with beurre blanc\",\n  \"beverage\": \"young powerful Barolo\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_INTENSITY_MISMATCH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 630,
            "output_tokens": 7,
            "latency_ms": 556.4,
            "usd": 0.000263,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 630,
            "output_tokens": 7,
            "latency_ms": 686.6,
            "usd": 0.000263,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 630,
            "output_tokens": 7,
            "latency_ms": 534.9,
            "usd": 0.000263,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 630.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 592.633,
        "usd_sum": 0.000789
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-013",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "390e90cae060ada8",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"yellowtail sashimi\",\n  \"beverage\": \"overproof 63% abv Jamaican rum\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_ALCOHOL_OVERPOWERS"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_ALCOHOL_OVERPOWERS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_alcohol_overpowers",
            "expected": "risk_alcohol_overpowers",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 632,
            "output_tokens": 9,
            "latency_ms": 714.0,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_ALCOHOL_OVERPOWERS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_alcohol_overpowers",
            "expected": "risk_alcohol_overpowers",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 632,
            "output_tokens": 9,
            "latency_ms": 973.0,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_ALCOHOL_OVERPOWERS",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_alcohol_overpowers",
            "expected": "risk_alcohol_overpowers",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 632,
            "output_tokens": 9,
            "latency_ms": 710.6,
            "usd": 0.000267,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 632.0,
        "output_tokens_mean": 9.0,
        "latency_ms_mean": 799.2,
        "usd_sum": 0.000801
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-014",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "d83b8b2ecabe9907",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"gin-cured salmon gravlax with dill\",\n  \"beverage\": \"young tannic Chianti Classico\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_TANNIN_FISH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 580.1,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 538.4,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_TANNIN_FISH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_tannin_fish",
            "expected": "risk_tannin_fish",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 969.9,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 635.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 696.133,
        "usd_sum": 0.000795
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-015",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "1e83097d483e0639",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"poached lobster with drawn butter\",\n  \"beverage\": \"aggressive 80-IBU West Coast IPA\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_BITTER_AMPLIFY"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 684.8,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 504.4,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_BITTER_AMPLIFY",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_bitter_amplify",
            "expected": "risk_bitter_amplify",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 635,
            "output_tokens": 7,
            "latency_ms": 686.5,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 635.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 625.233,
        "usd_sum": 0.000795
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-016",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "easy",
      "cuisine_region": "mexican",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "b2e02272ca87b2d4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"habanero-heavy Yucatán salsa\",\n  \"beverage\": \"15% abv Zinfandel\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SPICE_ALCOHOL"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 7,
            "latency_ms": 839.0,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 7,
            "latency_ms": 630.4,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SPICE_ALCOHOL",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_spice_alcohol",
            "expected": "risk_spice_alcohol",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 634,
            "output_tokens": 7,
            "latency_ms": 615.8,
            "usd": 0.000265,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 634.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 695.067,
        "usd_sum": 0.000795
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-017",
      "category": "risk",
      "capability": "risk_severity",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "12372c8de13add55",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nUsing OGS risk severity levels (low/medium/high), classify the risk severity of the described pairing. Respond with just the severity level.\n\nINPUT:\n{\n  \"dish\": \"simple grilled branzino with lemon\",\n  \"beverage\": \"heavily oaked California Chardonnay\",\n  \"risk_code\": \"RISK_OAK_MASKS\"\n}\n\nANSWER FORMAT: one_of:low,medium,high",
      "expected": {
        "value": "medium"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "high",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "high",
            "expected": "medium",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 597,
            "output_tokens": 1,
            "latency_ms": 446.8,
            "usd": 0.00024,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "high",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "high",
            "expected": "medium",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 597,
            "output_tokens": 1,
            "latency_ms": 2389.3,
            "usd": 0.00024,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "high",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "high",
            "expected": "medium",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 597,
            "output_tokens": 1,
            "latency_ms": 452.3,
            "usd": 0.00024,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 597.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 1096.133,
        "usd_sum": 0.00072
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-018",
      "category": "risk",
      "capability": "risk_severity",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "7a97c002792dfad9",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nUsing OGS risk severity levels, classify the severity of a tannic red wine with raw anchovies.\n\nINPUT:\n{\n  \"dish\": \"raw marinated anchovies\",\n  \"beverage\": \"young tannic Cabernet Sauvignon\",\n  \"risk_code\": \"RISK_TANNIN_FISH\"\n}\n\nANSWER FORMAT: one_of:low,medium,high",
      "expected": {
        "value": "high"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 587,
            "output_tokens": 1,
            "latency_ms": 891.1,
            "usd": 0.000236,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 587,
            "output_tokens": 1,
            "latency_ms": 589.2,
            "usd": 0.000236,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "high",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "high",
            "expected": "high",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 587,
            "output_tokens": 1,
            "latency_ms": 465.4,
            "usd": 0.000236,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 587.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 648.567,
        "usd_sum": 0.000708
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-019",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "german",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "0e3d6c3f9d86af0b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"raw Kumamoto oyster\",\n  \"beverage\": \"off-dry Spätlese Riesling with 45 g/L residual sugar\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_SWEETNESS_CONFLICT"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 8,
            "latency_ms": 558.3,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 8,
            "latency_ms": 655.9,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_SWEETNESS_CONFLICT",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_sweetness_conflict",
            "expected": "risk_sweetness_conflict",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 639,
            "output_tokens": 8,
            "latency_ms": 534.7,
            "usd": 0.000268,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 639.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 582.967,
        "usd_sum": 0.000804
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:risk-020",
      "category": "risk",
      "capability": "risk_identification",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.explanation_codes.negative"
      ],
      "prompt_template_hash": "2dd649d25c606ee7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (negative / risk only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n---\n\nReturn the single most important OGS negative explanation-code (risk) for the pairing below. Respond with just the code ID.\n\nINPUT:\n{\n  \"dish\": \"slow-braised oxtail stew with dark jus\",\n  \"beverage\": \"very pale Provençal rosé, 12.5% abv\"\n}\n\nANSWER FORMAT: one_of:RISK_TANNIN_FISH,RISK_ALCOHOL_OVERPOWERS,RISK_BITTER_AMPLIFY,RISK_OAK_MASKS,RISK_INTENSITY_MISMATCH,RISK_ACID_CLASH,RISK_SWEETNESS_CONFLICT,RISK_SPICE_ALCOHOL",
      "expected": {
        "value": "RISK_INTENSITY_MISMATCH"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "RISK_INTENSITY_MISMATCH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_intensity_mismatch",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 643,
            "output_tokens": 8,
            "latency_ms": 606.9,
            "usd": 0.00027,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "RISK_INTENSITY_MISMATCH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_intensity_mismatch",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 643,
            "output_tokens": 8,
            "latency_ms": 615.2,
            "usd": 0.00027,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "RISK_INTENSITY_MISMATCH",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "risk_intensity_mismatch",
            "expected": "risk_intensity_mismatch",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 643,
            "output_tokens": 8,
            "latency_ms": 781.3,
            "usd": 0.00027,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 643.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 667.8,
        "usd_sum": 0.00081
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-001",
      "category": "safety",
      "capability": "cooking_temperature",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "8e37b185cfaed2aa",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nWhat is the minimum core temperature (°C) required to safely cook poultry under the OGS HACCP reference? Respond with a single number.\n\nINPUT:\n{\n  \"product\": \"whole chicken\"\n}\n\nANSWER FORMAT: numeric:integer or decimal °C",
      "expected": {
        "value": 75,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 795,
            "output_tokens": 1,
            "latency_ms": 329.3,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 795,
            "output_tokens": 1,
            "latency_ms": 508.6,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 795,
            "output_tokens": 1,
            "latency_ms": 795.9,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 795.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 544.6,
        "usd_sum": 0.00096
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-002",
      "category": "safety",
      "capability": "cooking_temperature",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "d38d6470a18f7e97",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nA restaurant serves minced-beef smash burgers. What minimum core temperature (°C) should the patties reach? Respond with a single number.\n\nINPUT:\n{\n  \"product\": \"minced beef patty\"\n}\n\nANSWER FORMAT: numeric:integer °C",
      "expected": {
        "value": 75,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 796,
            "output_tokens": 1,
            "latency_ms": 558.2,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 796,
            "output_tokens": 1,
            "latency_ms": 470.0,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 796,
            "output_tokens": 1,
            "latency_ms": 545.6,
            "usd": 0.00032,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 796.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 524.6,
        "usd_sum": 0.00096
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-003",
      "category": "safety",
      "capability": "danger_zone",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "97f1d8e6fc44b9e2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nThe bacterial danger zone is defined between two temperatures in °C. Respond as 'LOW-HIGH' in °C (integers), e.g. '10-50'.\n\nANSWER FORMAT: exact:LOW-HIGH °C",
      "expected": {
        "value": "5-63",
        "aliases": [
          "5 to 63",
          "5-63°C",
          "5 C - 63 C"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:5-63 °C",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:5-63 °c",
            "expected": "5-63",
            "aliases": [
              "5 to 63",
              "5-63°C",
              "5 C - 63 C"
            ]
          },
          "metrics": {
            "input_tokens": 789,
            "output_tokens": 7,
            "latency_ms": 803.8,
            "usd": 0.000327,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:5-63 °C",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:5-63 °c",
            "expected": "5-63",
            "aliases": [
              "5 to 63",
              "5-63°C",
              "5 C - 63 C"
            ]
          },
          "metrics": {
            "input_tokens": 789,
            "output_tokens": 7,
            "latency_ms": 414.3,
            "usd": 0.000327,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:5-63 °C",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:5-63 °c",
            "expected": "5-63",
            "aliases": [
              "5 to 63",
              "5-63°C",
              "5 C - 63 C"
            ]
          },
          "metrics": {
            "input_tokens": 789,
            "output_tokens": 7,
            "latency_ms": 667.8,
            "usd": 0.000327,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 789.0,
        "output_tokens_mean": 7.0,
        "latency_ms_mean": 628.633,
        "usd_sum": 0.000981
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-004",
      "category": "safety",
      "capability": "cooling_rule",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "9e5770b6c7036769",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nUnder UK FSA guidance, hot food must be cooled from 63 °C down to 21 °C within how many hours? Respond with a single integer.\n\nANSWER FORMAT: numeric:integer hours",
      "expected": {
        "value": 2,
        "tolerance": 0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 784,
            "output_tokens": 3,
            "latency_ms": 607.1,
            "usd": 0.000318,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 784,
            "output_tokens": 3,
            "latency_ms": 587.2,
            "usd": 0.000318,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:2",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 2.0,
            "expected": 2.0,
            "tolerance": 0.0
          },
          "metrics": {
            "input_tokens": 784,
            "output_tokens": 3,
            "latency_ms": 686.4,
            "usd": 0.000318,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 784.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 626.9,
        "usd_sum": 0.000954
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-005",
      "category": "safety",
      "capability": "reheating",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "cb2ff73059921b59",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nTo what minimum core temperature (°C) must a previously cooked and chilled dish be reheated under OGS HACCP? Respond with a single number.\n\nANSWER FORMAT: numeric:integer °C",
      "expected": {
        "value": 75,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 783,
            "output_tokens": 1,
            "latency_ms": 459.2,
            "usd": 0.000315,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "75°C",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 783,
            "output_tokens": 2,
            "latency_ms": 624.3,
            "usd": 0.000316,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "75",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 75.0,
            "expected": 75.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 783,
            "output_tokens": 1,
            "latency_ms": 472.1,
            "usd": 0.000315,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 783.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 518.533,
        "usd_sum": 0.000946
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-006",
      "category": "safety",
      "capability": "hot_holding",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "b3c9f3a848d65390",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nWhat minimum temperature (°C) must hot food be held at for service? Respond with a single number.\n\nANSWER FORMAT: numeric:integer °C",
      "expected": {
        "value": 63,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "63",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 63.0,
            "expected": 63.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 774,
            "output_tokens": 1,
            "latency_ms": 1152.7,
            "usd": 0.000311,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "63",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 63.0,
            "expected": 63.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 774,
            "output_tokens": 1,
            "latency_ms": 660.3,
            "usd": 0.000311,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "63",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 63.0,
            "expected": 63.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 774,
            "output_tokens": 1,
            "latency_ms": 649.9,
            "usd": 0.000311,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 774.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 820.967,
        "usd_sum": 0.000933
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-007",
      "category": "safety",
      "capability": "cold_chain",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "f120f4a38737e981",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nWhat is the maximum temperature (°C) for frozen storage under OGS HACCP? Respond with a single number (negative).\n\nANSWER FORMAT: numeric:integer °C",
      "expected": {
        "value": -18,
        "tolerance": 1
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "-18",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": -18.0,
            "expected": -18.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 778,
            "output_tokens": 2,
            "latency_ms": 704.9,
            "usd": 0.000314,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "-18",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": -18.0,
            "expected": -18.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 778,
            "output_tokens": 2,
            "latency_ms": 613.1,
            "usd": 0.000314,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "numeric:-18 °C",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": -18.0,
            "expected": -18.0,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 778,
            "output_tokens": 5,
            "latency_ms": 440.1,
            "usd": 0.000319,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 778.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 586.033,
        "usd_sum": 0.000947
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-008",
      "category": "safety",
      "capability": "thawing_method",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "9b8f975bd042c7a0",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nWhich of the following thawing methods is explicitly NOT permitted under OGS HACCP: 'in the fridge at 4 °C', 'under cold running water', 'in the microwave just before cooking', 'at room temperature on the counter'? Respond with the exact method phrase.\n\nINPUT:\n{\n  \"methods\": [\n    \"in the fridge at 4 °C\",\n    \"under cold running water\",\n    \"in the microwave just before cooking\",\n    \"at room temperature on the counter\"\n  ]\n}\n\nANSWER FORMAT: exact:method phrase",
      "expected": {
        "value": "at room temperature on the counter",
        "aliases": [
          "room temperature",
          "on the counter",
          "room-temperature thawing"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:at room temperature on the counter",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:at room temperature on the counter",
            "expected": "at room temperature on the counter",
            "aliases": [
              "room temperature",
              "on the counter",
              "room-temperature thawing"
            ]
          },
          "metrics": {
            "input_tokens": 854,
            "output_tokens": 8,
            "latency_ms": 697.6,
            "usd": 0.000354,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:at room temperature on the counter",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:at room temperature on the counter",
            "expected": "at room temperature on the counter",
            "aliases": [
              "room temperature",
              "on the counter",
              "room-temperature thawing"
            ]
          },
          "metrics": {
            "input_tokens": 854,
            "output_tokens": 8,
            "latency_ms": 556.7,
            "usd": 0.000354,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:at room temperature on the counter",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:at room temperature on the counter",
            "expected": "at room temperature on the counter",
            "aliases": [
              "room temperature",
              "on the counter",
              "room-temperature thawing"
            ]
          },
          "metrics": {
            "input_tokens": 854,
            "output_tokens": 8,
            "latency_ms": 546.7,
            "usd": 0.000354,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 854.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 600.333,
        "usd_sum": 0.001062
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-009",
      "category": "safety",
      "capability": "ccp_identification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "set_f1",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "9f4b49c1c20e4376",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nFor a cook-chill dish of chicken curry with rice, identify the Critical Control Points from the OGS stage list. Return a comma-separated list using the OGS CCP stage IDs.\n\nINPUT:\n{\n  \"dish\": \"cook-chill chicken curry with rice\"\n}\n\nANSWER FORMAT: set:comma-separated CCP stage IDs",
      "expected": {
        "values": [
          "cooking",
          "cooling",
          "storage_cold",
          "reheating",
          "hot_holding"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "set:receiving,storage_cold,prep,cooking,cooling,storage_cold,reheating,service",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "cooking",
              "cooling",
              "prep",
              "reheating",
              "service",
              "set:receiving",
              "storage_cold"
            ],
            "expected": [
              "cooking",
              "cooling",
              "hot_holding",
              "reheating",
              "storage_cold"
            ],
            "precision": 0.5714,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 808,
            "output_tokens": 23,
            "latency_ms": 804.8,
            "usd": 0.00036,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "set:receiving,storage_cold,prep,cooking,cooling,storage_cold,reheating,service,cleaning_sanitising",
          "compliant": true,
          "score": 0.6154,
          "detail": {
            "predicted": [
              "cleaning_sanitising",
              "cooking",
              "cooling",
              "prep",
              "reheating",
              "service",
              "set:receiving",
              "storage_cold"
            ],
            "expected": [
              "cooking",
              "cooling",
              "hot_holding",
              "reheating",
              "storage_cold"
            ],
            "precision": 0.5,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 808,
            "output_tokens": 29,
            "latency_ms": 836.3,
            "usd": 0.00037,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "set:receiving,storage_cold,thawing,prep,cooking,cooling,storage_cold,service",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "cooking",
              "cooling",
              "prep",
              "service",
              "set:receiving",
              "storage_cold",
              "thawing"
            ],
            "expected": [
              "cooking",
              "cooling",
              "hot_holding",
              "reheating",
              "storage_cold"
            ],
            "precision": 0.4286,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 808,
            "output_tokens": 24,
            "latency_ms": 606.1,
            "usd": 0.000362,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.594,
      "score_summary": {
        "mean": 0.594,
        "stdev": 0.0854,
        "sem": 0.0493,
        "ci95_lo": 0.4974,
        "ci95_hi": 0.6906,
        "n": 3
      },
      "score_if_valid": 0.594,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 808.0,
        "output_tokens_mean": 25.333,
        "latency_ms_mean": 749.067,
        "usd_sum": 0.001092
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-010",
      "category": "safety",
      "capability": "cooking_temperature",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "eea6814938f18565",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nA bistro serves a rare whole-muscle ribeye (seared exterior, internal 46 °C). Under OGS HACCP, is this permitted? Answer yes or no.\n\nINPUT:\n{\n  \"product\": \"whole-muscle ribeye\",\n  \"core_c\": 46\n}\n\nANSWER FORMAT: exact:yes|no",
      "expected": {
        "value": "yes"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "no",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "no",
            "expected": "yes",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 1,
            "latency_ms": 444.6,
            "usd": 0.000326,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:yes",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:yes",
            "expected": "yes",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 3,
            "latency_ms": 417.5,
            "usd": 0.00033,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "no",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "no",
            "expected": "yes",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 812,
            "output_tokens": 1,
            "latency_ms": 413.0,
            "usd": 0.000326,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 812.0,
        "output_tokens_mean": 1.667,
        "latency_ms_mean": 425.033,
        "usd_sum": 0.000982
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-011",
      "category": "safety",
      "capability": "at_risk_groups",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "edc2981cec11c39e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nA children's hospital must serve hollandaise sauce. Which is the ONE compliant option: 'fresh shell eggs at 63 °C', 'fresh shell eggs at 70 °C', 'pasteurised egg yolk', 'raw shell eggs with lemon juice'? Respond with the exact phrase.\n\nINPUT:\n{\n  \"options\": [\n    \"fresh shell eggs at 63 °C\",\n    \"fresh shell eggs at 70 °C\",\n    \"pasteurised egg yolk\",\n    \"raw shell eggs with lemon juice\"\n  ]\n}\n\nANSWER FORMAT: exact:option phrase",
      "expected": {
        "value": "pasteurised egg yolk",
        "aliases": [
          "pasteurised egg",
          "pasteurized egg yolk",
          "pasteurized egg"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "exact:pasteurised egg yolk",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:pasteurised egg yolk",
            "expected": "pasteurised egg yolk",
            "aliases": [
              "pasteurised egg",
              "pasteurized egg yolk",
              "pasteurized egg"
            ]
          },
          "metrics": {
            "input_tokens": 861,
            "output_tokens": 8,
            "latency_ms": 626.9,
            "usd": 0.000357,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "exact:pasteurised egg yolk",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:pasteurised egg yolk",
            "expected": "pasteurised egg yolk",
            "aliases": [
              "pasteurised egg",
              "pasteurized egg yolk",
              "pasteurized egg"
            ]
          },
          "metrics": {
            "input_tokens": 861,
            "output_tokens": 8,
            "latency_ms": 467.1,
            "usd": 0.000357,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "exact:pasteurised egg yolk",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "exact:pasteurised egg yolk",
            "expected": "pasteurised egg yolk",
            "aliases": [
              "pasteurised egg",
              "pasteurized egg yolk",
              "pasteurized egg"
            ]
          },
          "metrics": {
            "input_tokens": 861,
            "output_tokens": 8,
            "latency_ms": 536.0,
            "usd": 0.000357,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 861.0,
        "output_tokens_mean": 8.0,
        "latency_ms_mean": 543.333,
        "usd_sum": 0.001071
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:safe-012",
      "category": "safety",
      "capability": "numeric_vector",
      "difficulty": "hard",
      "cuisine_region": "global",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "rules.haccp",
        "vocab.haccp_ccp_stages"
      ],
      "prompt_template_hash": "2ba4113ffe26b2fc",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### HACCP temperature and time rules (food-safety reference)\n\nTemperatures are expressed in degrees Celsius.\n\n**Danger zone**: 5 °C to 63 °C — limit total exposure to 4 hours.\n\n**Cold chain**:\n- Chilled storage: 0 to 5 °C\n- Frozen storage: ≤ -18 °C\n- Thawing: in fridge at ≤ 5 °C, or under cold running water, or in   microwave immediately before cooking. Never at room temperature.\n\n**Cooking core temperatures (minimum, hold for ≥15 s)**:\n- Poultry, reformed meat (burgers, sausages), stuffed meat: **≥ 75 °C**\n- Minced beef (burgers served medium+ from whole muscle after searing   all external surfaces to 75 °C is acceptable): **≥ 75 °C** as a   default; 70 °C for 2 min or 65 °C for 10 min are equivalent.\n- Whole-muscle beef/lamb (steak/roast) cooked to order: any internal   temperature acceptable if the external surface has been seared.\n- Pork: **≥ 63 °C** held 3 min (USDA) or 75 °C (EU retail default).\n- Fish (non-sushi-grade): **≥ 63 °C** held 15 s.\n- Shell eggs for at-risk groups: **≥ 70 °C** or use pasteurised egg.\n\n**Hot holding**: ≥ 63 °C.\n**Cooling**: 63 °C → 21 °C within 2 hours, then 21 °C → 5 °C within 4 more hours (UK FSA), or 57 °C → 21 °C within 2 hours and 21 °C → 5 °C within 4 hours (FDA).\n**Reheating**: reach **≥ 75 °C** (UK) / 74 °C (US) throughout, within 2 hours. Reheat once only.\n\n**CCP stages**: `receiving`, `storage_cold`, `storage_dry`, `thawing`, `prep`, `cooking`, `hot_holding`, `cooling`, `reheating`, `service`, `cleaning_sanitising`.\n\n### HACCP CCP stages\n\nreceiving, storage_cold, storage_dry, thawing, prep, cooking, hot_holding, cooling, reheating, service, cleaning_sanitising\n\n---\n\nReturn the canonical HACCP reference temperatures in °C for this dish flow (poultry cook-chill operation): [cold_storage_max, poultry_core_cook_min, hot_hold_min, reheat_min, frozen_storage_max]. Respond with a JSON array of five numbers only, in that order.\n\nINPUT:\n{\n  \"flow\": \"poultry cook-chill\"\n}\n\nANSWER FORMAT: json:array of 5 numbers",
      "expected": {
        "values": [
          5,
          75,
          63,
          75,
          -18
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[5, 75, 63, 75, -18]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 5,
                "expected": 5,
                "score": 1.0
              },
              {
                "index": 1,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 2,
                "predicted": 63,
                "expected": 63,
                "score": 1.0
              },
              {
                "index": 3,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 4,
                "predicted": -18,
                "expected": -18,
                "score": 1.0
              }
            ],
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 830,
            "output_tokens": 15,
            "latency_ms": 683.4,
            "usd": 0.000356,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[5, 75, 63, 75, -18]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 5,
                "expected": 5,
                "score": 1.0
              },
              {
                "index": 1,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 2,
                "predicted": 63,
                "expected": 63,
                "score": 1.0
              },
              {
                "index": 3,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 4,
                "predicted": -18,
                "expected": -18,
                "score": 1.0
              }
            ],
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 830,
            "output_tokens": 15,
            "latency_ms": 772.5,
            "usd": 0.000356,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[5, 75, 63, 75, -18]",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_position": [
              {
                "index": 0,
                "predicted": 5,
                "expected": 5,
                "score": 1.0
              },
              {
                "index": 1,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 2,
                "predicted": 63,
                "expected": 63,
                "score": 1.0
              },
              {
                "index": 3,
                "predicted": 75,
                "expected": 75,
                "score": 1.0
              },
              {
                "index": 4,
                "predicted": -18,
                "expected": -18,
                "score": 1.0
              }
            ],
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 830,
            "output_tokens": 15,
            "latency_ms": 10811.4,
            "usd": 0.000356,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 830.0,
        "output_tokens_mean": 15.0,
        "latency_ms_mean": 4089.1,
        "usd_sum": 0.001068
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-001",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "913118356da8150b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the umami intensity of a 36-month aged Parmigiano-Reggiano on the OGS 0-10 intensity scale (0 = not present, 10 = extreme). Respond with only a single decimal number.\n\nINPUT:\n{\n  \"ingredient\": \"Parmigiano-Reggiano 36 months\",\n  \"attribute\": \"umami\"\n}\n\nANSWER FORMAT: integer_0_10",
      "expected": {
        "value": 8.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "8.7",
          "compliant": true,
          "score": 0.8667,
          "detail": {
            "predicted": 8.7,
            "expected": 8.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 3,
            "latency_ms": 438.5,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "9.0",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 9.0,
            "expected": 8.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 3,
            "latency_ms": 457.3,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "8.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 8.5,
            "expected": 8.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 379,
            "output_tokens": 3,
            "latency_ms": 430.0,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8445,
      "score_summary": {
        "mean": 0.8445,
        "stdev": 0.1678,
        "sem": 0.0969,
        "ci95_lo": 0.6546,
        "ci95_hi": 1.0343,
        "n": 3
      },
      "score_if_valid": 0.8445,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 379.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 441.933,
        "usd_sum": 0.000468
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-002",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "2690028d89b60292",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the sour intensity of undiluted fresh lemon juice on the OGS 0-10 intensity scale. Respond with only a single decimal number.\n\nINPUT:\n{\n  \"ingredient\": \"fresh lemon juice (undiluted)\",\n  \"attribute\": \"sour\"\n}\n\nANSWER FORMAT: integer_0_10",
      "expected": {
        "value": 9.5,
        "tolerance": 1.0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "9.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 9.5,
            "expected": 9.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 470.2,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "9.0",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": 9.0,
            "expected": 9.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 534.1,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "9.0",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": 9.0,
            "expected": 9.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 361,
            "output_tokens": 3,
            "latency_ms": 475.9,
            "usd": 0.000149,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.2887,
        "sem": 0.1667,
        "ci95_lo": 0.34,
        "ci95_hi": 0.9933,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 361.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 493.4,
        "usd_sum": 0.000447
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-003",
      "category": "sensory",
      "capability": "structural_estimation",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "b09502eb4fd5d61f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nEstimate the tannin intensity of a young (under 5 years old) Barolo on the OGS 0-10 intensity scale. Respond with only a single decimal number.\n\nINPUT:\n{\n  \"beverage\": \"young Barolo, <5 years\",\n  \"attribute\": \"tannin\"\n}\n\nANSWER FORMAT: integer_0_10",
      "expected": {
        "value": 8.5,
        "tolerance": 1.0
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "8.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 8.5,
            "expected": 8.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 715.0,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 7.5,
            "expected": 8.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 577.3,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 7.5,
            "expected": 8.5,
            "tolerance": 1.0
          },
          "metrics": {
            "input_tokens": 349,
            "output_tokens": 3,
            "latency_ms": 619.8,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 349.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 637.367,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-004",
      "category": "sensory",
      "capability": "basic_taste_vector",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "94f009713056d52f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nReturn the five basic tastes for a ripe, in-season strawberry, each on the OGS 0-10 intensity scale. Respond as a single JSON object with keys sweet, sour, salty, bitter, umami.\n\nINPUT:\n{\n  \"ingredient\": \"ripe, in-season strawberry\"\n}\n\nANSWER FORMAT: json\n\nRESPONSE JSON SCHEMA:\n{\n  \"type\": \"object\",\n  \"required\": [\n    \"sweet\",\n    \"sour\",\n    \"salty\",\n    \"bitter\",\n    \"umami\"\n  ],\n  \"properties\": {\n    \"sweet\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"sour\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"salty\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"bitter\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"umami\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    }\n  },\n  \"additionalProperties\": false\n}",
      "expected": {
        "values": {
          "sweet": 7.0,
          "sour": 4.0,
          "salty": 0.5,
          "bitter": 1.0,
          "umami": 0.5
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"sweet\": 7.5,\n  \"sour\": 5.0,\n  \"salty\": 0.1,\n  \"bitter\": 0.3,\n  \"umami\": 0.0\n}",
          "compliant": true,
          "score": 0.69,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 7.5,
                "expected": 7.0,
                "score": 0.75
              },
              "sour": {
                "predicted": 5.0,
                "expected": 4.0,
                "score": 0.5
              },
              "salty": {
                "predicted": 0.1,
                "expected": 0.5,
                "score": 0.8
              },
              "bitter": {
                "predicted": 0.3,
                "expected": 1.0,
                "score": 0.65
              },
              "umami": {
                "predicted": 0.0,
                "expected": 0.5,
                "score": 0.75
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 567,
            "output_tokens": 51,
            "latency_ms": 1022.8,
            "usd": 0.000308,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"sweet\": 7.5,\n  \"sour\": 4.0,\n  \"salty\": 0.1,\n  \"bitter\": 0.5,\n  \"umami\": 0.1\n}",
          "compliant": true,
          "score": 0.82,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 7.5,
                "expected": 7.0,
                "score": 0.75
              },
              "sour": {
                "predicted": 4.0,
                "expected": 4.0,
                "score": 1.0
              },
              "salty": {
                "predicted": 0.1,
                "expected": 0.5,
                "score": 0.8
              },
              "bitter": {
                "predicted": 0.5,
                "expected": 1.0,
                "score": 0.75
              },
              "umami": {
                "predicted": 0.1,
                "expected": 0.5,
                "score": 0.8
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 567,
            "output_tokens": 51,
            "latency_ms": 784.4,
            "usd": 0.000308,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"sweet\": 7,\n  \"sour\": 5,\n  \"salty\": 0,\n  \"bitter\": 1,\n  \"umami\": 0\n}",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 7,
                "expected": 7.0,
                "score": 1.0
              },
              "sour": {
                "predicted": 5,
                "expected": 4.0,
                "score": 0.5
              },
              "salty": {
                "predicted": 0,
                "expected": 0.5,
                "score": 0.75
              },
              "bitter": {
                "predicted": 1,
                "expected": 1.0,
                "score": 1.0
              },
              "umami": {
                "predicted": 0,
                "expected": 0.5,
                "score": 0.75
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 567,
            "output_tokens": 41,
            "latency_ms": 842.8,
            "usd": 0.000292,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.77,
      "score_summary": {
        "mean": 0.77,
        "stdev": 0.07,
        "sem": 0.0404,
        "ci95_lo": 0.6908,
        "ci95_hi": 0.8492,
        "n": 3
      },
      "score_if_valid": 0.77,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 567.0,
        "output_tokens_mean": 47.667,
        "latency_ms_mean": 883.333,
        "usd_sum": 0.000908
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-005",
      "category": "sensory",
      "capability": "structural_estimation",
      "difficulty": "medium",
      "cuisine_region": "american",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "c950ec9851ad52bf",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nEstimate the body of a New World (California) oaked Chardonnay at 14% ABV on the OGS 0-10 body scale (0 = water-thin, 10 = syrupy). Respond with only a single decimal number.\n\nINPUT:\n{\n  \"beverage\": \"oaked California Chardonnay, 14% ABV\",\n  \"attribute\": \"body\"\n}\n\nANSWER FORMAT: integer_0_10",
      "expected": {
        "value": 7.0,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "7.0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 7.0,
            "expected": 7.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 3,
            "latency_ms": 548.2,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 7.5,
            "expected": 7.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 3,
            "latency_ms": 629.7,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "7.0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 7.0,
            "expected": 7.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 365,
            "output_tokens": 3,
            "latency_ms": 495.4,
            "usd": 0.000151,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8889,
      "score_summary": {
        "mean": 0.8889,
        "stdev": 0.1924,
        "sem": 0.1111,
        "ci95_lo": 0.6711,
        "ci95_hi": 1.1067,
        "n": 3
      },
      "score_if_valid": 0.8889,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 365.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 557.767,
        "usd_sum": 0.000453
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-006",
      "category": "sensory",
      "capability": "dominant_aromatic_family",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "d68ef8562addd55d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nIdentify the single dominant aromatic FAMILY (from the OGS vocabulary) of a classic Marlborough Sauvignon Blanc. Respond with just the family name.\n\nINPUT:\n{\n  \"beverage\": \"Marlborough Sauvignon Blanc\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "vegetal"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "fruit",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "fruit",
            "expected": "vegetal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 330,
            "output_tokens": 1,
            "latency_ms": 460.4,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "fruit",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "fruit",
            "expected": "vegetal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 330,
            "output_tokens": 1,
            "latency_ms": 803.1,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "fruit",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "fruit",
            "expected": "vegetal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 330,
            "output_tokens": 1,
            "latency_ms": 591.8,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 330.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 618.433,
        "usd_sum": 0.000402
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-007",
      "category": "sensory",
      "capability": "structural_vector",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "658c14cce434cc0a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nReturn the OGS structural attributes of a Brut Champagne on the 0-10 scale. Respond as a JSON object with keys body, acidity_perception, tannin, astringency, alcohol_heat, effervescence.\n\nINPUT:\n{\n  \"beverage\": \"Brut non-vintage Champagne\"\n}\n\nANSWER FORMAT: json\n\nRESPONSE JSON SCHEMA:\n{\n  \"type\": \"object\",\n  \"required\": [\n    \"body\",\n    \"acidity_perception\",\n    \"tannin\",\n    \"astringency\",\n    \"alcohol_heat\",\n    \"effervescence\"\n  ],\n  \"properties\": {\n    \"body\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"acidity_perception\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"tannin\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"astringency\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"alcohol_heat\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"effervescence\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    }\n  },\n  \"additionalProperties\": false\n}",
      "expected": {
        "values": {
          "body": 4.5,
          "acidity_perception": 8.0,
          "tannin": 0.5,
          "astringency": 1.0,
          "alcohol_heat": 3.5,
          "effervescence": 9.0
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"body\": 3,\n  \"acidity_perception\": 7,\n  \"tannin\": 0,\n  \"astringency\": 0,\n  \"alcohol_heat\": 3,\n  \"effervescence\": 8\n}",
          "compliant": true,
          "score": 0.5417,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 3,
                "expected": 4.5,
                "score": 0.25
              },
              "acidity_perception": {
                "predicted": 7,
                "expected": 8.0,
                "score": 0.5
              },
              "tannin": {
                "predicted": 0,
                "expected": 0.5,
                "score": 0.75
              },
              "astringency": {
                "predicted": 0,
                "expected": 1.0,
                "score": 0.5
              },
              "alcohol_heat": {
                "predicted": 3,
                "expected": 3.5,
                "score": 0.75
              },
              "effervescence": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 594,
            "output_tokens": 54,
            "latency_ms": 5356.0,
            "usd": 0.000324,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"body\": 3,\n  \"acidity_perception\": 7,\n  \"tannin\": 1,\n  \"astringency\": 2,\n  \"alcohol_heat\": 3,\n  \"effervescence\": 8\n}",
          "compliant": true,
          "score": 0.5417,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 3,
                "expected": 4.5,
                "score": 0.25
              },
              "acidity_perception": {
                "predicted": 7,
                "expected": 8.0,
                "score": 0.5
              },
              "tannin": {
                "predicted": 1,
                "expected": 0.5,
                "score": 0.75
              },
              "astringency": {
                "predicted": 2,
                "expected": 1.0,
                "score": 0.5
              },
              "alcohol_heat": {
                "predicted": 3,
                "expected": 3.5,
                "score": 0.75
              },
              "effervescence": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 594,
            "output_tokens": 54,
            "latency_ms": 1203.5,
            "usd": 0.000324,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"body\": 3,\n  \"acidity_perception\": 7,\n  \"tannin\": 1,\n  \"astringency\": 1,\n  \"alcohol_heat\": 3,\n  \"effervescence\": 8\n}",
          "compliant": true,
          "score": 0.625,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 3,
                "expected": 4.5,
                "score": 0.25
              },
              "acidity_perception": {
                "predicted": 7,
                "expected": 8.0,
                "score": 0.5
              },
              "tannin": {
                "predicted": 1,
                "expected": 0.5,
                "score": 0.75
              },
              "astringency": {
                "predicted": 1,
                "expected": 1.0,
                "score": 1.0
              },
              "alcohol_heat": {
                "predicted": 3,
                "expected": 3.5,
                "score": 0.75
              },
              "effervescence": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 594,
            "output_tokens": 54,
            "latency_ms": 1821.1,
            "usd": 0.000324,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.5695,
      "score_summary": {
        "mean": 0.5695,
        "stdev": 0.0481,
        "sem": 0.0278,
        "ci95_lo": 0.515,
        "ci95_hi": 0.6239,
        "n": 3
      },
      "score_if_valid": 0.5695,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 594.0,
        "output_tokens_mean": 54.0,
        "latency_ms_mean": 2793.533,
        "usd_sum": 0.000972
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-008",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "873c6c2f9608e43f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the bitter intensity of raw radicchio on the OGS 0-10 intensity scale. Respond with only a single decimal number.\n\nINPUT:\n{\n  \"ingredient\": \"raw radicchio\",\n  \"attribute\": \"bitter\"\n}\n\nANSWER FORMAT: integer_0_10",
      "expected": {
        "value": 6.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "6.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 6.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 356,
            "output_tokens": 3,
            "latency_ms": 477.5,
            "usd": 0.000147,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "7.0",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 7.0,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 356,
            "output_tokens": 3,
            "latency_ms": 700.4,
            "usd": 0.000147,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 7.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 356,
            "output_tokens": 3,
            "latency_ms": 655.9,
            "usd": 0.000147,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.3334,
        "sem": 0.1925,
        "ci95_lo": 0.2894,
        "ci95_hi": 1.0439,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 356.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 611.267,
        "usd_sum": 0.000441
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-009",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "523e04f39252cb4b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the umami intensity of a plain 10-minute cold-steep kombu dashi on the OGS 0-10 scale. Respond with only a single decimal number.\n\nINPUT:\n{\n  \"ingredient\": \"kombu dashi (cold steep 10 min, 10 g/L Hokkaido kombu)\",\n  \"attribute\": \"umami\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 6.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "5.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 5.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 380,
            "output_tokens": 3,
            "latency_ms": 476.3,
            "usd": 0.000157,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "5.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 5.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 380,
            "output_tokens": 3,
            "latency_ms": 531.3,
            "usd": 0.000157,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "4.5",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": 4.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 380,
            "output_tokens": 3,
            "latency_ms": 621.3,
            "usd": 0.000157,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.2222,
      "score_summary": {
        "mean": 0.2222,
        "stdev": 0.1924,
        "sem": 0.1111,
        "ci95_lo": 0.0044,
        "ci95_hi": 0.44,
        "n": 3
      },
      "score_if_valid": 0.2222,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 380.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 542.967,
        "usd_sum": 0.000471
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-010",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "1813cae53ba17a84",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the bitter intensity of a square of unsweetened 85% cacao dark chocolate on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"ingredient\": \"single-origin 85% dark chocolate\",\n  \"attribute\": \"bitter\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 6.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 7.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 358,
            "output_tokens": 3,
            "latency_ms": 394.9,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 7.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 358,
            "output_tokens": 3,
            "latency_ms": 477.9,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "7.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 7.5,
            "expected": 6.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 358,
            "output_tokens": 3,
            "latency_ms": 369.3,
            "usd": 0.000148,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.3333,
        "ci95_hi": 0.3333,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 358.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 414.033,
        "usd_sum": 0.000444
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-011",
      "category": "sensory",
      "capability": "structural_estimation",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "1886b2e009716d1d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nEstimate the perceived acidity of freshly squeezed yuzu juice on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"ingredient\": \"fresh yuzu juice\",\n  \"attribute\": \"acidity_perception\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 9.0,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "8.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 8.5,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 331,
            "output_tokens": 3,
            "latency_ms": 452.7,
            "usd": 0.000137,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "9.0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 9.0,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 331,
            "output_tokens": 3,
            "latency_ms": 601.3,
            "usd": 0.000137,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "8.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 8.5,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 331,
            "output_tokens": 3,
            "latency_ms": 432.4,
            "usd": 0.000137,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.7778,
      "score_summary": {
        "mean": 0.7778,
        "stdev": 0.1924,
        "sem": 0.1111,
        "ci95_lo": 0.56,
        "ci95_hi": 0.9956,
        "n": 3
      },
      "score_if_valid": 0.7778,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 331.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 495.467,
        "usd_sum": 0.000411
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-012",
      "category": "sensory",
      "capability": "basic_taste_vector",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "8d40a83f1b501685",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nReturn the five basic tastes of fresh-squeezed Valencia orange juice on the OGS 0-10 scale as a JSON object with keys sweet, sour, salty, bitter, umami. Respond with ONLY the JSON.\n\nINPUT:\n{\n  \"ingredient\": \"freshly squeezed Valencia orange juice\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "values": {
          "sweet": 7.0,
          "sour": 5.5,
          "salty": 0.0,
          "bitter": 0.5,
          "umami": 0.0
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\"sweet\":7, \"sour\":6, \"salty\":0.5, \"bitter\":1, \"umami\":0}",
          "compliant": true,
          "score": 0.85,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 7,
                "expected": 7.0,
                "score": 1.0
              },
              "sour": {
                "predicted": 6,
                "expected": 5.5,
                "score": 0.75
              },
              "salty": {
                "predicted": 0.5,
                "expected": 0.0,
                "score": 0.75
              },
              "bitter": {
                "predicted": 1,
                "expected": 0.5,
                "score": 0.75
              },
              "umami": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 364,
            "output_tokens": 31,
            "latency_ms": 792.6,
            "usd": 0.000195,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"sweet\": 6.5,\n  \"sour\": 5.5,\n  \"salty\": 0.2,\n  \"bitter\": 1.0,\n  \"umami\": 0.0\n}",
          "compliant": true,
          "score": 0.88,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 6.5,
                "expected": 7.0,
                "score": 0.75
              },
              "sour": {
                "predicted": 5.5,
                "expected": 5.5,
                "score": 1.0
              },
              "salty": {
                "predicted": 0.2,
                "expected": 0.0,
                "score": 0.9
              },
              "bitter": {
                "predicted": 1.0,
                "expected": 0.5,
                "score": 0.75
              },
              "umami": {
                "predicted": 0.0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 364,
            "output_tokens": 51,
            "latency_ms": 983.2,
            "usd": 0.000227,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"sweet\": 6.5,\n  \"sour\": 5.5,\n  \"salty\": 0,\n  \"bitter\": 1.0,\n  \"umami\": 0\n}",
          "compliant": true,
          "score": 0.9,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 6.5,
                "expected": 7.0,
                "score": 0.75
              },
              "sour": {
                "predicted": 5.5,
                "expected": 5.5,
                "score": 1.0
              },
              "salty": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              },
              "bitter": {
                "predicted": 1.0,
                "expected": 0.5,
                "score": 0.75
              },
              "umami": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 364,
            "output_tokens": 47,
            "latency_ms": 896.4,
            "usd": 0.000221,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8767,
      "score_summary": {
        "mean": 0.8767,
        "stdev": 0.0252,
        "sem": 0.0145,
        "ci95_lo": 0.8482,
        "ci95_hi": 0.9051,
        "n": 3
      },
      "score_if_valid": 0.8767,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 364.0,
        "output_tokens_mean": 43.0,
        "latency_ms_mean": 890.733,
        "usd_sum": 0.000643
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-013",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "medium",
      "cuisine_region": "middle_eastern",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "691f59088f3e98f3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the sweet intensity of a fresh Medjool date on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"ingredient\": \"Medjool date, soft and ripe\",\n  \"attribute\": \"sweet\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 9.0,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "9.0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 9.0,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 539.0,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "9.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 9.5,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 1226.2,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "9.0",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 9.0,
            "expected": 9.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 352,
            "output_tokens": 3,
            "latency_ms": 443.5,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8889,
      "score_summary": {
        "mean": 0.8889,
        "stdev": 0.1924,
        "sem": 0.1111,
        "ci95_lo": 0.6711,
        "ci95_hi": 1.1067,
        "n": 3
      },
      "score_if_valid": 0.8889,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 352.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 736.233,
        "usd_sum": 0.000438
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-014",
      "category": "sensory",
      "capability": "dominant_aromatic_family",
      "difficulty": "medium",
      "cuisine_region": "scottish",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "c4a83a79265c0e9f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nIdentify the single dominant aromatic FAMILY (OGS vocabulary) of a heavily-peated Islay single malt (e.g. Ardbeg 10).\n\nINPUT:\n{\n  \"item\": \"heavily peated Islay single malt whisky, ~50 ppm phenols\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "wood"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "earth",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "earth",
            "expected": "wood",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 342,
            "output_tokens": 1,
            "latency_ms": 460.7,
            "usd": 0.000138,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "earth",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "earth",
            "expected": "wood",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 342,
            "output_tokens": 1,
            "latency_ms": 617.5,
            "usd": 0.000138,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "earth",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "earth",
            "expected": "wood",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 342,
            "output_tokens": 1,
            "latency_ms": 447.3,
            "usd": 0.000138,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 342.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 508.5,
        "usd_sum": 0.000414
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-015",
      "category": "sensory",
      "capability": "structural_vector",
      "difficulty": "hard",
      "cuisine_region": "italian",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "66d64ca2254c09ee",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nReturn the OGS structural attributes of a 5-year-old Amarone della Valpolicella on the 0-10 scale. Respond with a JSON object with keys body, acidity_perception, tannin, astringency, alcohol_heat, effervescence. JSON only.\n\nINPUT:\n{\n  \"item\": \"Amarone della Valpolicella Classico, 5 years old, 16% abv\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "values": {
          "body": 9.0,
          "acidity_perception": 6.0,
          "tannin": 7.0,
          "astringency": 5.5,
          "alcohol_heat": 8.5,
          "effervescence": 0.0
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"body\": 8,\n  \"acidity_perception\": 5,\n  \"tannin\": 7,\n  \"astringency\": 6,\n  \"alcohol_heat\": 7,\n  \"effervescence\": 0\n}",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              },
              "acidity_perception": {
                "predicted": 5,
                "expected": 6.0,
                "score": 0.5
              },
              "tannin": {
                "predicted": 7,
                "expected": 7.0,
                "score": 1.0
              },
              "astringency": {
                "predicted": 6,
                "expected": 5.5,
                "score": 0.75
              },
              "alcohol_heat": {
                "predicted": 7,
                "expected": 8.5,
                "score": 0.25
              },
              "effervescence": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 54,
            "latency_ms": 868.0,
            "usd": 0.000235,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"body\": 8,\n  \"acidity_perception\": 5,\n  \"tannin\": 7,\n  \"astringency\": 6,\n  \"alcohol_heat\": 7,\n  \"effervescence\": 0\n}",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              },
              "acidity_perception": {
                "predicted": 5,
                "expected": 6.0,
                "score": 0.5
              },
              "tannin": {
                "predicted": 7,
                "expected": 7.0,
                "score": 1.0
              },
              "astringency": {
                "predicted": 6,
                "expected": 5.5,
                "score": 0.75
              },
              "alcohol_heat": {
                "predicted": 7,
                "expected": 8.5,
                "score": 0.25
              },
              "effervescence": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 54,
            "latency_ms": 1219.5,
            "usd": 0.000235,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"body\": 8,\n  \"acidity_perception\": 4,\n  \"tannin\": 7,\n  \"astringency\": 5,\n  \"alcohol_heat\": 6,\n  \"effervescence\": 0\n}",
          "compliant": true,
          "score": 0.5417,
          "detail": {
            "per_key": {
              "body": {
                "predicted": 8,
                "expected": 9.0,
                "score": 0.5
              },
              "acidity_perception": {
                "predicted": 4,
                "expected": 6.0,
                "score": 0.0
              },
              "tannin": {
                "predicted": 7,
                "expected": 7.0,
                "score": 1.0
              },
              "astringency": {
                "predicted": 5,
                "expected": 5.5,
                "score": 0.75
              },
              "alcohol_heat": {
                "predicted": 6,
                "expected": 8.5,
                "score": 0.0
              },
              "effervescence": {
                "predicted": 0,
                "expected": 0.0,
                "score": 1.0
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 371,
            "output_tokens": 54,
            "latency_ms": 1061.2,
            "usd": 0.000235,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.625,
      "score_summary": {
        "mean": 0.625,
        "stdev": 0.0722,
        "sem": 0.0417,
        "ci95_lo": 0.5434,
        "ci95_hi": 0.7067,
        "n": 3
      },
      "score_if_valid": 0.625,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 371.0,
        "output_tokens_mean": 54.0,
        "latency_ms_mean": 1049.567,
        "usd_sum": 0.000705
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-016",
      "category": "sensory",
      "capability": "basic_taste_estimation",
      "difficulty": "medium",
      "cuisine_region": "spanish",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "70318c6fd3b033d2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nEstimate the umami intensity of 24-month acorn-finished jamón ibérico de bellota on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"ingredient\": \"jamón ibérico de bellota, 24 months, acorn-finished\",\n  \"attribute\": \"umami\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 8.0,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "8.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 8.5,
            "expected": 8.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 662.1,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "8.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 8.5,
            "expected": 8.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 432.6,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "8.5",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 8.5,
            "expected": 8.0,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 370,
            "output_tokens": 3,
            "latency_ms": 605.6,
            "usd": 0.000153,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 370.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 566.767,
        "usd_sum": 0.000459
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-017",
      "category": "sensory",
      "capability": "dominant_aromatic_family",
      "difficulty": "medium",
      "cuisine_region": "thai",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "c3cedde89152ce76",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nIdentify the single dominant aromatic FAMILY of fresh Thai basil (horapa) leaves.\n\nINPUT:\n{\n  \"item\": \"fresh Thai basil (horapa), whole leaves\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "herbal"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 321,
            "output_tokens": 2,
            "latency_ms": 574.9,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 321,
            "output_tokens": 2,
            "latency_ms": 546.1,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 321,
            "output_tokens": 2,
            "latency_ms": 371.1,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 321.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 497.367,
        "usd_sum": 0.000396
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-018",
      "category": "sensory",
      "capability": "structural_estimation",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "7d31de11173a7fd2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nEstimate the body of a dry Provençal rosé (e.g. Côtes de Provence Grenache-led) on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"item\": \"dry Provençal rosé, 12.5% abv, minimal skin contact\",\n  \"attribute\": \"body\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 3.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "3.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 3.5,
            "expected": 3.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 353,
            "output_tokens": 3,
            "latency_ms": 537.7,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "3.0",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": 3.0,
            "expected": 3.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 353,
            "output_tokens": 3,
            "latency_ms": 484.2,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "3.5",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": 3.5,
            "expected": 3.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 353,
            "output_tokens": 3,
            "latency_ms": 425.6,
            "usd": 0.000146,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8889,
      "score_summary": {
        "mean": 0.8889,
        "stdev": 0.1924,
        "sem": 0.1111,
        "ci95_lo": 0.6711,
        "ci95_hi": 1.1067,
        "n": 3
      },
      "score_if_valid": 0.8889,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 353.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 482.5,
        "usd_sum": 0.000438
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-019",
      "category": "sensory",
      "capability": "basic_taste_vector",
      "difficulty": "hard",
      "cuisine_region": "korean",
      "scorer": "numeric_vector_mae",
      "context_needs": [
        "scales.basic_tastes"
      ],
      "prompt_template_hash": "2e7df899a0148ebd",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n---\n\nReturn the five basic tastes of well-fermented napa kimchi (6 weeks at 4°C after 2 days at room temp) as a JSON object.\n\nINPUT:\n{\n  \"ingredient\": \"napa kimchi, 6 weeks fermented, ripe\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "values": {
          "sweet": 1.0,
          "sour": 7.0,
          "salty": 6.0,
          "bitter": 1.5,
          "umami": 7.0
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"sweet\": 4.5,\n  \"sour\": 7.5,\n  \"salty\": 6.0,\n  \"bitter\": 1.5,\n  \"umami\": 6.5\n}",
          "compliant": true,
          "score": 0.7,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 4.5,
                "expected": 1.0,
                "score": 0.0
              },
              "sour": {
                "predicted": 7.5,
                "expected": 7.0,
                "score": 0.75
              },
              "salty": {
                "predicted": 6.0,
                "expected": 6.0,
                "score": 1.0
              },
              "bitter": {
                "predicted": 1.5,
                "expected": 1.5,
                "score": 1.0
              },
              "umami": {
                "predicted": 6.5,
                "expected": 7.0,
                "score": 0.75
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 51,
            "latency_ms": 1277.1,
            "usd": 0.000224,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"sweet\": 4.5,\n  \"sour\": 7.5,\n  \"salty\": 6.0,\n  \"bitter\": 1.5,\n  \"umami\": 5.5\n}",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 4.5,
                "expected": 1.0,
                "score": 0.0
              },
              "sour": {
                "predicted": 7.5,
                "expected": 7.0,
                "score": 0.75
              },
              "salty": {
                "predicted": 6.0,
                "expected": 6.0,
                "score": 1.0
              },
              "bitter": {
                "predicted": 1.5,
                "expected": 1.5,
                "score": 1.0
              },
              "umami": {
                "predicted": 5.5,
                "expected": 7.0,
                "score": 0.25
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 51,
            "latency_ms": 891.4,
            "usd": 0.000224,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"sweet\": 4.5,\n  \"sour\": 7.5,\n  \"salty\": 6.0,\n  \"bitter\": 1.5,\n  \"umami\": 6.5\n}",
          "compliant": true,
          "score": 0.7,
          "detail": {
            "per_key": {
              "sweet": {
                "predicted": 4.5,
                "expected": 1.0,
                "score": 0.0
              },
              "sour": {
                "predicted": 7.5,
                "expected": 7.0,
                "score": 0.75
              },
              "salty": {
                "predicted": 6.0,
                "expected": 6.0,
                "score": 1.0
              },
              "bitter": {
                "predicted": 1.5,
                "expected": 1.5,
                "score": 1.0
              },
              "umami": {
                "predicted": 6.5,
                "expected": 7.0,
                "score": 0.75
              }
            },
            "tolerance": 2.0
          },
          "metrics": {
            "input_tokens": 355,
            "output_tokens": 51,
            "latency_ms": 1106.1,
            "usd": 0.000224,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0577,
        "sem": 0.0333,
        "ci95_lo": 0.6013,
        "ci95_hi": 0.732,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 355.0,
        "output_tokens_mean": 51.0,
        "latency_ms_mean": 1091.533,
        "usd_sum": 0.000672
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:sense-020",
      "category": "sensory",
      "capability": "structural_estimation",
      "difficulty": "medium",
      "cuisine_region": "portuguese",
      "scorer": "numeric_mae",
      "context_needs": [
        "scales.structural"
      ],
      "prompt_template_hash": "ca5b9ab85fe71acb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n---\n\nEstimate the effervescence of a typical young Vinho Verde (traditional style with slight spritz) on the OGS 0-10 scale.\n\nINPUT:\n{\n  \"item\": \"young Vinho Verde, slightly spritzig\",\n  \"attribute\": \"effervescence\"\n}\n\nANSWER FORMAT: decimal_0_10",
      "expected": {
        "value": 2.5,
        "tolerance": 1.5
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "3.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 3.5,
            "expected": 2.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 3,
            "latency_ms": 397.7,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "3.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 3.5,
            "expected": 2.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 3,
            "latency_ms": 461.2,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "3.5",
          "compliant": true,
          "score": 0.3333,
          "detail": {
            "predicted": 3.5,
            "expected": 2.5,
            "tolerance": 1.5
          },
          "metrics": {
            "input_tokens": 345,
            "output_tokens": 3,
            "latency_ms": 459.3,
            "usd": 0.000143,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.3333,
        "ci95_hi": 0.3333,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 345.0,
        "output_tokens_mean": 3.0,
        "latency_ms_mean": 439.4,
        "usd_sum": 0.000429
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-001",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "4523d68b7da194f7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'fleur de sel' as an OGS ingredient. It MUST validate against the OGS ingredient schema (schema/ingredient.schema.json). Respond with ONLY the JSON object, no markdown fences.\n\nINPUT:\n{\n  \"ingredient_name\": \"Fleur de sel\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"fleur-de-sel\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:fleur-de-sel",
          "ogs_version": "0.1.0",
          "category": "salt"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:fleur-de-sel\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Fleur de sel\"\n  },\n  \"category\": \"salt\",\n  \"description\": {\n    \"en\": \"A delicate, flaky sea salt harvested by hand from the surface of salt evaporation ponds, prized for its subtle mineral flavor and crunchy texture.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"salty\": 7,\n      \"bitter\": 0.5,\n      \"sweet\": 0.2,\n      \"sour\": 0,\n      \"umami\": 0\n    },\n    \"texture\": [\n      \"crunchy\",\n      \"flaky\"\n    ],\n    \"finish_length\": 3,\n    \"complexity\": 4\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"France, Guérande and other coastal regions\"\n    },\n    \"season\": {\n      \"en\": \"Summer\"\n    },\n    \"terroir_notes\": {\n      \"en\": \"Influenced by the mineral content of the local seawater and climate conditions.\"\n    }\n  },\n  \"notes\": {\n    \"en\": \"Used mostly as a finishing salt to enhance dishes with its delicate texture and light salty flavor.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:fleur-de-sel",
                "expected": "ogs:core:ingredient:fleur-de-sel",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "salt",
                "expected": "salt",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2514,
            "output_tokens": 291,
            "latency_ms": 3134.7,
            "usd": 0.001471,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:fleur-de-sel\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Fleur de sel\"\n  },\n  \"category\": \"salt\",\n  \"description\": {\n    \"en\": \"A delicate, hand-harvested sea salt known for its fine crystals and clean, slightly briny flavor.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"salty\": 9,\n      \"bitter\": 0.5,\n      \"sweet\": 0.2,\n      \"sour\": 0,\n      \"umami\": 0.1\n    },\n    \"aromatics\": [],\n    \"texture\": [\n      \"crystalline\",\n      \"fine\"\n    ],\n    \"finish_length\": 3,\n    \"complexity\": 2\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"Coastal regions of France\"\n    },\n    \"notes\": {\n      \"en\": \"Traditionally harvested on salt marshes by skilled workers using wooden tools.\"\n    }\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:fleur-de-sel",
                "expected": "ogs:core:ingredient:fleur-de-sel",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "salt",
                "expected": "salt",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2514,
            "output_tokens": 242,
            "latency_ms": 2494.7,
            "usd": 0.001393,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:fleur-de-sel\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Fleur de sel\"\n  },\n  \"category\": \"salt\",\n  \"description\": {\n    \"en\": \"Fleur de sel is a delicate and moist sea salt known for its fine, flaky crystals and subtle mineral flavor, often used as a finishing salt in culinary applications.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:fleur-de-sel",
                "expected": "ogs:core:ingredient:fleur-de-sel",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "salt",
                "expected": "salt",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2514,
            "output_tokens": 99,
            "latency_ms": 1362.9,
            "usd": 0.001164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2514.0,
        "output_tokens_mean": 210.667,
        "latency_ms_mean": 2330.767,
        "usd_sum": 0.004028
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-002",
      "category": "structured_output",
      "capability": "schema_valid_beverage",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.beverage",
        "vocab.beverage_types",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "367b58556573440d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS beverage schema (`schema/beverage.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/beverage.schema.json\",\n  \"title\": \"OGS Beverage\",\n  \"description\": \"A beverage entity with type, structural attributes, sensory profile, and provenance.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"type\",\n    \"sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"type\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"wine\",\n        \"beer\",\n        \"spirit\",\n        \"cider\",\n        \"sake\",\n        \"other\"\n      ]\n    },\n    \"subtype\": {\n      \"type\": \"string\"\n    },\n    \"grape_varieties\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"string\"\n      }\n    },\n    \"vintage\": {\n      \"type\": \"integer\"\n    },\n    \"region\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"appellation\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"producer\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"abv\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"residual_sugar\": {\n      \"type\": \"number\",\n      \"minimum\": 0\n    },\n    \"serve_temperature_c\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"min\": {\n          \"type\": \"number\"\n        },\n        \"max\": {\n          \"type\": \"number\"\n        }\n      },\n      \"additionalProperties\": false,\n      \"description\": \"Recommended serving temperature range in degrees Celsius (SI canonical).\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document describing a Sancerre (Sauvignon Blanc, Loire Valley) that validates against the OGS beverage schema. Respond with ONLY the JSON object, no markdown fences.\n\nINPUT:\n{\n  \"beverage_name\": \"Sancerre\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"sancerre-generic\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/beverage.schema.json",
        "required_fields": {
          "id": "ogs:core:beverage:sancerre-generic",
          "ogs_version": "0.1.0",
          "type": "wine"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:sancerre-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Sancerre\",\n  \"type\": \"wine\",\n  \"grape_varieties\": [\n    \"Sauvignon Blanc\"\n  ],\n  \"region\": \"Loire Valley\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 0.5,\n      \"sour\": 7.5,\n      \"salty\": 0,\n      \"bitter\": 2,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 4,\n      \"acidity_perception\": 7.5,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"stone fruit\",\n        \"item\": \"peach\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"vegetal\",\n        \"item\": \"green bell pepper\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"floral\",\n        \"item\": \"elderflower\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"crisp\",\n      \"refreshing\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:sancerre-generic",
                "expected": "ogs:core:beverage:sancerre-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2515,
            "output_tokens": 382,
            "latency_ms": 3572.7,
            "usd": 0.001617,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:sancerre-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Sancerre\",\n  \"type\": \"wine\",\n  \"grape_varieties\": [\n    \"Sauvignon Blanc\"\n  ],\n  \"region\": {\n    \"en\": \"Loire Valley\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 0.5,\n      \"sour\": 7,\n      \"salty\": 0,\n      \"bitter\": 1.5,\n      \"umami\": 0.5\n    },\n    \"structural\": {\n      \"body\": 3,\n      \"acidity_perception\": 7,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 2,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"green fruit\",\n        \"item\": \"green apple\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"herbaceous\",\n        \"item\": \"grassy\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"mineral\",\n        \"item\": \"flinty\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"elderflower\",\n        \"intensity\": 2\n      }\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:sancerre-generic",
                "expected": "ogs:core:beverage:sancerre-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2515,
            "output_tokens": 406,
            "latency_ms": 3544.1,
            "usd": 0.001656,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:sancerre-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Sancerre\",\n  \"type\": \"wine\",\n  \"grape_varieties\": [\n    \"Sauvignon Blanc\"\n  ],\n  \"region\": \"Loire Valley\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 1,\n      \"sour\": 7,\n      \"salty\": 0,\n      \"bitter\": 3,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 4,\n      \"acidity_perception\": 7,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lime\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"berry\",\n        \"item\": \"gooseberry\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"green\",\n        \"item\": \"grass\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"white flowers\",\n        \"item\": \"acacia\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"crisp\"\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:sancerre-generic",
                "expected": "ogs:core:beverage:sancerre-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2515,
            "output_tokens": 367,
            "latency_ms": 3327.0,
            "usd": 0.001593,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2515.0,
        "output_tokens_mean": 385.0,
        "latency_ms_mean": 3481.267,
        "usd_sum": 0.004866
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-003",
      "category": "structured_output",
      "capability": "schema_valid_pairing",
      "difficulty": "expert",
      "cuisine_region": "french",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.pairing",
        "vocab.match_types",
        "vocab.explanation_codes",
        "vocab.grade_scale",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "2740dc70d150e12a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS pairing schema (`schema/pairing.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/pairing.schema.json\",\n  \"title\": \"OGS Pairing\",\n  \"description\": \"A scored, explained pairing between a dish and a beverage.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"dish_ref\",\n    \"beverage_ref\",\n    \"match_types\",\n    \"overall_score\",\n    \"grade\",\n    \"explanation_codes\",\n    \"confidence\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:pairing:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"dish_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"beverage_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"match_types\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/match_type_entry\"\n      }\n    },\n    \"overall_score\": {\n      \"type\": \"integer\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"grade\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"A+\",\n        \"A\",\n        \"B+\",\n        \"B\",\n        \"C+\",\n        \"C\",\n        \"D\",\n        \"F\"\n      ]\n    },\n    \"explanation_codes\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/explanation_code_entry\"\n      }\n    },\n    \"risks\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/risk_entry\"\n      }\n    },\n    \"confidence\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 1\n    },\n    \"evidence\": {\n      \"$ref\": \"#/$defs/evidence\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"match_type_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"type\",\n        \"score\"\n      ],\n      \"properties\": {\n        \"type\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"harmony\",\n            \"contrast\",\n            \"bridge\",\n            \"intensity_alignment\",\n            \"structural_balance\",\n            \"refresh\"\n          ]\n        },\n        \"score\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 100\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"primary\": {\n          \"type\": \"boolean\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"explanation_code_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"weight\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 1\n        },\n        \"detail\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"risk_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\",\n        \"severity\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"severity\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"low\",\n            \"medium\",\n            \"high\"\n          ]\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"mitigation\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"evidence\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"expert_panel\",\n            \"instrumental\",\n            \"computed\",\n            \"estimated\"\n          ]\n        },\n        \"sources\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n### OGS Explanation Codes\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS pairing JSON document for the described match. It MUST validate against the OGS pairing schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"pairing_slug\": \"brie-champagne\",\n  \"dish_ref\": \"ogs:core:dish:brie-on-baguette\",\n  \"beverage_ref\": \"ogs:core:beverage:brut-champagne-nv\",\n  \"intent\": \"A classic refresh pairing driven by effervescence and acidity against creamy brie.\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/pairing.schema.json",
        "required_fields": {
          "id": "ogs:core:pairing:brie-champagne",
          "ogs_version": "0.1.0",
          "dish_ref": "ogs:core:dish:brie-on-baguette",
          "beverage_ref": "ogs:core:beverage:brut-champagne-nv"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:brie-champagne\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Brie on Baguette with Brut Champagne\"\n  },\n  \"dish_ref\": \"ogs:core:dish:brie-on-baguette\",\n  \"beverage_ref\": \"ogs:core:beverage:brut-champagne-nv\",\n  \"match_types\": [\n    {\n      \"type\": \"refresh\",\n      \"score\": 95,\n      \"description\": \"Effervescence and acidity in the champagne refresh the palate, balancing the creamy texture of brie.\",\n      \"primary\": true\n    },\n    {\n      \"type\": \"contrast\",\n      \"score\": 85,\n      \"description\": \"The acidity and bubbles contrast beautifully with the rich creaminess of the brie, creating a lively experience.\"\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 80,\n      \"description\": \"The light acidity and moderate body of the champagne structurally balance the creamy, fatty cheese.\"\n    }\n  ],\n  \"overall_score\": 88,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"REFRESH_EFFERVESCE\",\n      \"weight\": 0.5,\n      \"detail\": \"The champagne's carbonation physically cleanses the palate between creamy bites.\"\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.4,\n      \"detail\": \"High acidity in the champagne cuts through the creaminess of the brie, refreshing the mouthfeel.\"\n    },\n    {\n      \"code\": \"CONTRAST_RICH_CRISP\",\n      \"weight\": 0.3,\n      \"detail\": \"Rich fat from the brie contrasts with the crisp, bubbly nature of the champagne.\"\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.2,\n      \"detail\": \"The acidity in the champagne balances the fat content in the cheese, avoiding heaviness.\"\n    }\n  ],\n  \"confidence\": 0.95,\n  \"description\": {\n    \"en\": \"A classic pairing where the brut champagne's effervescence and acidity refresh the palate and balance the creamy texture of brie on a baguette.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:brie-champagne",
                "expected": "ogs:core:pairing:brie-champagne",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:brie-on-baguette",
                "expected": "ogs:core:dish:brie-on-baguette",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:brut-champagne-nv",
                "expected": "ogs:core:beverage:brut-champagne-nv",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3380,
            "output_tokens": 501,
            "latency_ms": 5000.3,
            "usd": 0.002154,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:brie-champagne\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Brie on Baguette with Brut Champagne\",\n  \"dish_ref\": \"ogs:core:dish:brie-on-baguette\",\n  \"beverage_ref\": \"ogs:core:beverage:brut-champagne-nv\",\n  \"match_types\": [\n    {\n      \"type\": \"refresh\",\n      \"score\": 95,\n      \"description\": \"The effervescence of the champagne cleanses the palate of the creamy brie, while the high acidity refreshes and balances the richness.\",\n      \"primary\": true\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 85,\n      \"description\": \"Acidity in the champagne balances the fat in the creamy brie, providing a harmonious structural balance.\"\n    },\n    {\n      \"type\": \"harmony\",\n      \"score\": 80,\n      \"description\": \"The delicate flavors and texture of the brie complement the bright and crisp nature of the champagne.\"\n    }\n  ],\n  \"overall_score\": 90,\n  \"grade\": \"A\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"REFRESH_EFFERVESCE\",\n      \"weight\": 0.5,\n      \"detail\": \"Carbonation in Champagne scrubs and refreshes the palate after each bite of creamy brie.\"\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.3,\n      \"detail\": \"Champagne's high acidity cuts through and refreshes the richness of the brie cheese.\"\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.2,\n      \"detail\": \"Acid in the beverage balances the creamy fat of the brie, providing structural harmony.\"\n    }\n  ],\n  \"confidence\": 0.95\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:brie-champagne",
                "expected": "ogs:core:pairing:brie-champagne",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:brie-on-baguette",
                "expected": "ogs:core:dish:brie-on-baguette",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:brut-champagne-nv",
                "expected": "ogs:core:beverage:brut-champagne-nv",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3380,
            "output_tokens": 418,
            "latency_ms": 3800.8,
            "usd": 0.002021,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:brie-champagne\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Brie on Baguette and Brut Champagne NV\",\n  \"dish_ref\": \"ogs:core:dish:brie-on-baguette\",\n  \"beverage_ref\": \"ogs:core:beverage:brut-champagne-nv\",\n  \"match_types\": [\n    {\n      \"type\": \"refresh\",\n      \"score\": 95,\n      \"description\": \"Effervescence in the champagne cleanses the palate between bites of creamy brie.\",\n      \"primary\": true\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 90,\n      \"description\": \"High acidity in the champagne cuts through the richness of the brie, refreshing the palate.\"\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 85,\n      \"description\": \"The acidity balance and richness create a harmonious mouthfeel.\"\n    }\n  ],\n  \"overall_score\": 90,\n  \"grade\": \"A\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"REFRESH_EFFERVESCE\",\n      \"weight\": 0.5,\n      \"detail\": \"Champagne bubbles scrub the palate, refreshing after creamy brie.\"\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.4,\n      \"detail\": \"The high acidity of the brut champagne balances the creamy fat of the brie.\"\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.3,\n      \"detail\": \"Acidity in the champagne provides a refreshing contrast to the dish’s richness.\"\n    }\n  ],\n  \"confidence\": 0.9,\n  \"notes\": \"A classic and elegant pairing where the champagne's effervescence and acidity perfectly refresh and balance the creamy texture and richness of brie cheese.\",\n  \"metadata\": {\n    \"created\": \"2024-06-15T12:00:00Z\",\n    \"source\": \"Expert Gastronomy Panel\",\n    \"license\": \"CC-BY-4.0\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:brie-champagne",
                "expected": "ogs:core:pairing:brie-champagne",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:brie-on-baguette",
                "expected": "ogs:core:dish:brie-on-baguette",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:brut-champagne-nv",
                "expected": "ogs:core:beverage:brut-champagne-nv",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3380,
            "output_tokens": 468,
            "latency_ms": 5053.4,
            "usd": 0.002101,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3380.0,
        "output_tokens_mean": 462.333,
        "latency_ms_mean": 4618.167,
        "usd_sum": 0.006276
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-004",
      "category": "structured_output",
      "capability": "sensory_profile_json",
      "difficulty": "medium",
      "cuisine_region": "indian",
      "scorer": "json_match",
      "context_needs": [
        "scales.basic_tastes",
        "scales.structural",
        "vocab.aromatic_families",
        "vocab.qualifiers"
      ],
      "prompt_template_hash": "9bd6860a8c5c69ec",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Aromatic qualifiers\n\nfresh, dried, cooked, candied, fermented, zest, overripe\n\n---\n\nProduce a JSON object representing the sensory profile for ripe mango. The object must have keys basic_tastes (with sweet, sour, salty, bitter, umami), structural (with body, acidity_perception, tannin, astringency, alcohol_heat, effervescence), and aromatics (array of entries with family, group, item, intensity). All intensities on 0-10. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"item\": \"ripe Alphonso mango, peak ripeness\"\n}\n\nANSWER FORMAT: json\n\nRESPONSE JSON SCHEMA:\n{\n  \"type\": \"object\",\n  \"required\": [\n    \"basic_tastes\",\n    \"structural\",\n    \"aromatics\"\n  ],\n  \"properties\": {\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"sweet\",\n        \"sour\",\n        \"salty\",\n        \"bitter\",\n        \"umami\"\n      ]\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"body\",\n        \"acidity_perception\",\n        \"tannin\",\n        \"astringency\",\n        \"alcohol_heat\",\n        \"effervescence\"\n      ]\n    },\n    \"aromatics\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"object\",\n        \"required\": [\n          \"family\",\n          \"group\",\n          \"item\",\n          \"intensity\"\n        ]\n      }\n    }\n  }\n}",
      "expected": {
        "value": {
          "basic_tastes": {
            "sweet": 8.5,
            "sour": 3.0,
            "salty": 0,
            "bitter": 0.5,
            "umami": 0.5
          },
          "structural": {
            "body": 5.5,
            "acidity_perception": 3.0,
            "tannin": 0,
            "astringency": 0.5,
            "alcohol_heat": 0,
            "effervescence": 0
          }
        },
        "required_keys": [
          "basic_tastes.sweet",
          "basic_tastes.sour",
          "structural.body",
          "structural.acidity_perception",
          "structural.tannin"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 8,\n    \"sour\": 3,\n    \"salty\": 0,\n    \"bitter\": 0.5,\n    \"umami\": 0.5\n  },\n  \"structural\": {\n    \"body\": 6,\n    \"acidity_perception\": 4,\n    \"tannin\": 0,\n    \"astringency\": 0,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": [\n    {\n      \"family\": \"fruit\",\n      \"group\": \"tropical\",\n      \"item\": \"mango\",\n      \"intensity\": 7\n    },\n    {\n      \"family\": \"floral\",\n      \"group\": \"flower\",\n      \"item\": \"jasmine\",\n      \"intensity\": 2\n    },\n    {\n      \"family\": \"spice\",\n      \"group\": \"sweet spice\",\n      \"item\": \"clove\",\n      \"intensity\": 1.5\n    },\n    {\n      \"family\": \"fruit\",\n      \"group\": \"citrus\",\n      \"item\": \"orange zest\",\n      \"intensity\": 1.5\n    },\n    {\n      \"family\": \"dairy\",\n      \"group\": \"creamy\",\n      \"item\": \"buttery\",\n      \"intensity\": 1\n    }\n  ]\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.sweet": {
                "predicted": 8,
                "expected": 8.5,
                "match": true
              },
              "basic_tastes.sour": {
                "predicted": 3,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 6,
                "expected": 5.5,
                "match": true
              },
              "structural.acidity_perception": {
                "predicted": 4,
                "expected": 3.0,
                "match": true
              },
              "structural.tannin": {
                "predicted": 0,
                "expected": 0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 713,
            "output_tokens": 305,
            "latency_ms": 3189.0,
            "usd": 0.000773,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 8.5,\n    \"sour\": 3.5,\n    \"salty\": 0,\n    \"bitter\": 0.5,\n    \"umami\": 0.5\n  },\n  \"structural\": {\n    \"body\": 6,\n    \"acidity_perception\": 3.5,\n    \"tannin\": 0,\n    \"astringency\": 0,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": [\n    {\n      \"family\": \"fruit\",\n      \"group\": \"fresh\",\n      \"item\": \"mango\",\n      \"intensity\": 8\n    },\n    {\n      \"family\": \"floral\",\n      \"group\": \"fresh\",\n      \"item\": \"jasmine\",\n      \"intensity\": 3\n    },\n    {\n      \"family\": \"herbal\",\n      \"group\": \"fresh\",\n      \"item\": \"green leaf\",\n      \"intensity\": 2\n    },\n    {\n      \"family\": \"spice\",\n      \"group\": \"fresh\",\n      \"item\": \"cardamom\",\n      \"intensity\": 1.5\n    }\n  ]\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.sweet": {
                "predicted": 8.5,
                "expected": 8.5,
                "match": true
              },
              "basic_tastes.sour": {
                "predicted": 3.5,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 6,
                "expected": 5.5,
                "match": true
              },
              "structural.acidity_perception": {
                "predicted": 3.5,
                "expected": 3.0,
                "match": true
              },
              "structural.tannin": {
                "predicted": 0,
                "expected": 0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 713,
            "output_tokens": 271,
            "latency_ms": 3015.4,
            "usd": 0.000719,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 8.5,\n    \"sour\": 3.5,\n    \"salty\": 0,\n    \"bitter\": 0.5,\n    \"umami\": 0.5\n  },\n  \"structural\": {\n    \"body\": 6,\n    \"acidity_perception\": 4,\n    \"tannin\": 0,\n    \"astringency\": 0,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": [\n    {\n      \"family\": \"fruit\",\n      \"group\": \"fresh\",\n      \"item\": \"mango\",\n      \"intensity\": 8\n    },\n    {\n      \"family\": \"fruit\",\n      \"group\": \"cooked\",\n      \"item\": \"pineapple\",\n      \"intensity\": 3\n    },\n    {\n      \"family\": \"floral\",\n      \"group\": \"fresh\",\n      \"item\": \"jasmine\",\n      \"intensity\": 3\n    },\n    {\n      \"family\": \"herbal\",\n      \"group\": \"fresh\",\n      \"item\": \"lemongrass\",\n      \"intensity\": 2\n    },\n    {\n      \"family\": \"confection\",\n      \"group\": \"candied\",\n      \"item\": \"honey\",\n      \"intensity\": 4\n    }\n  ]\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.sweet": {
                "predicted": 8.5,
                "expected": 8.5,
                "match": true
              },
              "basic_tastes.sour": {
                "predicted": 3.5,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 6,
                "expected": 5.5,
                "match": true
              },
              "structural.acidity_perception": {
                "predicted": 4,
                "expected": 3.0,
                "match": true
              },
              "structural.tannin": {
                "predicted": 0,
                "expected": 0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 713,
            "output_tokens": 304,
            "latency_ms": 3419.7,
            "usd": 0.000772,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 713.0,
        "output_tokens_mean": 293.333,
        "latency_ms_mean": 3208.033,
        "usd_sum": 0.002264
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-005",
      "category": "structured_output",
      "capability": "schema_valid_dish",
      "difficulty": "expert",
      "cuisine_region": "french",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.dish",
        "vocab.component_roles",
        "vocab.cooking_methods",
        "vocab.courses",
        "vocab.service_temperatures",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "9caf9bc474b2464d",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS dish schema (`schema/dish.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/dish.schema.json\",\n  \"title\": \"OGS Dish\",\n  \"description\": \"A composed dish with components, cooking methods, and overall sensory profile.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"components\",\n    \"overall_sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"cuisine\": {\n      \"type\": \"string\",\n      \"description\": \"Free-text cuisine name (v0.1) OR a cuisine entity reference matching ogs:<ns>:cuisine:<id> (v0.2+).\"\n    },\n    \"course\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"amuse\",\n        \"appetizer\",\n        \"soup\",\n        \"salad\",\n        \"fish\",\n        \"main\",\n        \"cheese\",\n        \"dessert\",\n        \"petit_four\"\n      ]\n    },\n    \"service_temperature\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"frozen\",\n        \"cold\",\n        \"cool\",\n        \"room\",\n        \"warm\",\n        \"hot\",\n        \"very_hot\"\n      ]\n    },\n    \"components\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/component\"\n      }\n    },\n    \"overall_sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"component\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"role\",\n        \"ingredient_ref\"\n      ],\n      \"properties\": {\n        \"role\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"main_protein\",\n            \"main_vegetable\",\n            \"starch\",\n            \"sauce\",\n            \"fat_component\",\n            \"acid_component\",\n            \"aromatic_garnish\",\n            \"seasoning\",\n            \"textural_element\"\n          ]\n        },\n        \"ingredient_ref\": {\n          \"type\": \"string\",\n          \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\"\n        },\n        \"cooking_method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"raw\",\n            \"seared\",\n            \"grilled\",\n            \"roasted\",\n            \"braised\",\n            \"poached\",\n            \"steamed\",\n            \"fried\",\n            \"confit\",\n            \"smoked\",\n            \"fermented\",\n            \"cured\",\n            \"pickled\",\n            \"dehydrated\",\n            \"sous_vide\",\n            \"blanched\",\n            \"sauteed\",\n            \"baked\",\n            \"charred\",\n            \"emulsified\"\n          ]\n        },\n        \"maillard_index\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"reduction_level\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"proportion\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"dominant\",\n            \"significant\",\n            \"accent\",\n            \"trace\"\n          ]\n        },\n        \"sensory_contribution\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"basic_tastes\": {\n              \"$ref\": \"#/$defs/basic_tastes\"\n            },\n            \"aromatics\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/$defs/aromatic_entry\"\n              }\n            },\n            \"texture\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"additionalProperties\": true\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS dish JSON document for 'moules marinieres' (mussels steamed with white wine, shallots, parsley, butter). It must validate against the OGS dish schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"dish_slug\": \"moules-marinieres\",\n  \"cuisine\": \"french\",\n  \"course\": \"main\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/dish.schema.json",
        "required_fields": {
          "id": "ogs:core:dish:moules-marinieres",
          "ogs_version": "0.1.0"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:custom:dish:moules-marinieres\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Moules Marinieres\"\n  },\n  \"cuisine\": \"french\",\n  \"course\": \"main\",\n  \"components\": [\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:mussels\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 6,\n          \"salty\": 4\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"shellfish\",\n            \"item\": \"mussels\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\"tender\", \"slightly chewy\"]\n      }\n    },\n    {\n      \"role\": \"acid_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:white-wine\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sour\": 4\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"white wine\",\n            \"item\": \"white wine\",\n            \"intensity\": 5\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:shallot\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"onion\",\n            \"item\": \"shallot\",\n            \"intensity\": 6\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"fat_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:butter\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 2\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"dairy\",\n            \"group\": \"butter\",\n            \"item\": \"butter\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\n          \"creamy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:parsley\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"parsley\",\n            \"item\": \"parsley\",\n            \"intensity\": 3,\n            \"qualifier\": \"fresh\"\n          }\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 4,\n      \"salty\": 4,\n      \"bitter\": 1,\n      \"umami\": 6\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"marine\",\n        \"group\": \"shellfish\",\n        \"item\": \"mussels\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"white wine\",\n        \"item\": \"white wine\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"onion\",\n        \"item\": \"shallot\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"dairy\",\n        \"group\": \"butter\",\n        \"item\": \"butter\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"parsley\",\n        \"item\": \"parsley\",\n        \"intensity\": 3,\n        \"qualifier\": \"fresh\"\n      }\n    ],\n    \"texture\": [\n      \"tender\",\n      \"slightly chewy\",\n      \"creamy\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": false,
            "required_fields": {
              "id": {
                "predicted": "ogs:custom:dish:moules-marinieres",
                "expected": "ogs:core:dish:moules-marinieres",
                "match": false
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3080,
            "output_tokens": 976,
            "latency_ms": 11242.9,
            "usd": 0.002794,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:moules-marinieres\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Moules Marinieres\"\n  },\n  \"cuisine\": \"french\",\n  \"course\": \"main\",\n  \"service_temperature\": \"hot\",\n  \"components\": [\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:blue-mussel\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 6,\n          \"salty\": 2\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"shellfish\",\n            \"item\": \"mussel\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\n          \"tender\",\n          \"slightly_chewy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:flat-leaf-parsley\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"parsley\",\n            \"item\": \"flat-leaf parsley\",\n            \"intensity\": 5,\n            \"qualifier\": \"fresh\"\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"seasoning\",\n      \"ingredient_ref\": \"ogs:core:ingredient:sea-salt\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"salty\": 5\n        }\n      }\n    },\n    {\n      \"role\": \"sauce\",\n      \"ingredient_ref\": \"ogs:core:ingredient:dry-white-wine\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sour\": 4\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"white wine\",\n            \"item\": \"dry white wine\",\n            \"intensity\": 5\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:shallot\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"onion\",\n            \"item\": \"shallot\",\n            \"intensity\": 6\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"fat_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:butter\",\n      \"cooking_method\": \"emulsified\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 2,\n          \"umami\": 3\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"dairy\",\n            \"group\": \"butter\",\n            \"item\": \"butter\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\n          \"creamy\",\n          \"smooth\"\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 4,\n      \"salty\": 4,\n      \"bitter\": 0,\n      \"umami\": 6\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"marine\",\n        \"group\": \"shellfish\",\n        \"item\": \"mussel\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"parsley\",\n        \"item\": \"flat-leaf parsley\",\n        \"intensity\": 5,\n        \"qualifier\": \"fresh\"\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"white wine\",\n        \"item\": \"dry white wine\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"onion\",\n        \"item\": \"shallot\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"dairy\",\n        \"group\": \"butter\",\n        \"item\": \"butter\",\n        \"intensity\": 7\n      }\n    ],\n    \"texture\": [\n      \"tender\",\n      \"slightly_chewy\",\n      \"creamy\",\n      \"smooth\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:moules-marinieres",
                "expected": "ogs:core:dish:moules-marinieres",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3080,
            "output_tokens": 1099,
            "latency_ms": 11224.6,
            "usd": 0.00299,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:custom:dish:moules-marinieres\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"moules marinieres\",\n  \"cuisine\": \"french\",\n  \"course\": \"main\",\n  \"service_temperature\": \"hot\",\n  \"components\": [\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:mussels\",\n      \"cooking_method\": \"steamed\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 7,\n          \"salty\": 3\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"shellfish\",\n            \"item\": \"mussel\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"tender\",\n          \"slightly_chewy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"acid_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:white-wine\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sour\": 4,\n          \"sweet\": 1.5,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"white fruit\",\n            \"item\": \"apple\",\n            \"qualifier\": \"fresh\",\n            \"intensity\": 4\n          },\n          {\n            \"family\": \"floral\",\n            \"group\": \"flower\",\n            \"item\": \"honeysuckle\",\n            \"intensity\": 2.5\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"seasoning\",\n      \"ingredient_ref\": \"ogs:core:ingredient:shallots\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"bitter\": 0.5\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"vegetal\",\n            \"group\": \"allium\",\n            \"item\": \"shallot\",\n            \"intensity\": 5\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"fat_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:butter\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"umami\": 2\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"dairy\",\n            \"group\": \"buttery\",\n            \"item\": \"butter\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"creamy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:parsley\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"green herb\",\n            \"item\": \"parsley\",\n            \"intensity\": 3\n          }\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 3,\n      \"salty\": 3,\n      \"bitter\": 0.5,\n      \"umami\": 7\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"marine\",\n        \"group\": \"shellfish\",\n        \"item\": \"mussel\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"white fruit\",\n        \"item\": \"apple\",\n        \"qualifier\": \"fresh\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"vegetal\",\n        \"group\": \"allium\",\n        \"item\": \"shallot\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"dairy\",\n        \"group\": \"buttery\",\n        \"item\": \"butter\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"green herb\",\n        \"item\": \"parsley\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"tender\",\n      \"slightly_chewy\",\n      \"creamy\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 7\n  }\n}",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": false,
            "required_fields": {
              "id": {
                "predicted": "ogs:custom:dish:moules-marinieres",
                "expected": "ogs:core:dish:moules-marinieres",
                "match": false
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3080,
            "output_tokens": 1056,
            "latency_ms": 10763.7,
            "usd": 0.002922,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.2887,
        "sem": 0.1667,
        "ci95_lo": 0.34,
        "ci95_hi": 0.9933,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3080.0,
        "output_tokens_mean": 1043.667,
        "latency_ms_mean": 11077.067,
        "usd_sum": 0.008706
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-006",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "medium",
      "cuisine_region": "iranian",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "bb4d0fcee2d1e751",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'saffron' as an OGS ingredient. It MUST validate against the OGS ingredient schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"ingredient_name\": \"saffron\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"saffron-stigma\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:saffron-stigma",
          "ogs_version": "0.1.0",
          "category": "spice"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:saffron-stigma\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"saffron\",\n  \"category\": \"spice\",\n  \"description\": \"Dried stigmas of the Crocus sativus flower, valued for its unique aroma, flavor, and vivid golden-yellow color in culinary uses.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"bitter\": 2,\n      \"sweet\": 1,\n      \"sour\": 0,\n      \"salty\": 0,\n      \"umami\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"floral\",\n        \"group\": \"spice-floral\",\n        \"item\": \"saffron-floral\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"earthy\",\n        \"item\": \"hay\",\n        \"intensity\": 3\n      }\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 7\n  },\n  \"origin\": {\n    \"region\": \"Mediterranean\",\n    \"season\": \"Autumn\",\n    \"terroir_notes\": \"Grown in well-drained soils with dry summers and mild winters.\"\n  },\n  \"notes\": \"Saffron is one of the most expensive spices by weight, used primarily for adding aroma, flavor, and color to dishes.\"\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:saffron-stigma",
                "expected": "ogs:core:ingredient:saffron-stigma",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 316,
            "latency_ms": 3877.6,
            "usd": 0.001506,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:saffron-stigma\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"saffron\",\n  \"category\": \"spice\",\n  \"description\": \"Saffron is the dried stigma of the Crocus sativus flower, known for its distinctive aroma, flavor, and vibrant yellow-orange color used in cooking.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"bitter\": 3,\n      \"umami\": 1,\n      \"sour\": 0,\n      \"salty\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"crocus\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"herbal spice\",\n        \"item\": \"saffron aroma\",\n        \"intensity\": 8\n      }\n    ],\n    \"complexity\": 7,\n    \"finish_length\": 6\n  },\n  \"origin\": {\n    \"region\": \"Mediterranean, Iran, India\",\n    \"season\": \"Autumn\",\n    \"terroir_notes\": \"Grows in arid climates with well-drained soil; harvesting occurs by hand in early morning.\"\n  },\n  \"notes\": \"Saffron is the most expensive spice by weight due to the labor-intensive harvesting process. Used to impart a golden yellow color and a unique aroma and flavor to dishes.\"\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:saffron-stigma",
                "expected": "ogs:core:ingredient:saffron-stigma",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 337,
            "latency_ms": 3508.2,
            "usd": 0.00154,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:saffron-stigma\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"saffron\",\n  \"category\": \"spice\",\n  \"description\": \"Saffron is a highly prized spice derived from the dried stigmas of the Crocus sativus flower, known for its distinctive aroma, flavor, and vivid yellow-orange color.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 1,\n      \"bitter\": 1,\n      \"umami\": 0,\n      \"salty\": 0,\n      \"sour\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"floral\",\n        \"group\": \"floral\",\n        \"item\": \"hay\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"herbal\",\n        \"item\": \"dry green\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"earth\",\n        \"item\": \"earthy\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\"powdery\"],\n    \"finish_length\": 6,\n    \"complexity\": 7\n  },\n  \"origin\": {\n    \"region\": \"Mediterranean and Southwest Asia\",\n    \"season\": \"Autumn\",\n    \"terroir_notes\": \"Thrives in dry, warm climates with well-drained soil; terroir influences subtle aromatic variations.\"\n  },\n  \"metadata\": {\n    \"source\": \"Expert culinary sensory profiles\",\n    \"license\": \"CC0\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:saffron-stigma",
                "expected": "ogs:core:ingredient:saffron-stigma",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 365,
            "latency_ms": 3689.7,
            "usd": 0.001585,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2502.0,
        "output_tokens_mean": 339.333,
        "latency_ms_mean": 3691.833,
        "usd_sum": 0.004631
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-007",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "0bc2e49ee6c1093e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'miso (red)' as an OGS ingredient. It MUST validate against the OGS ingredient schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"ingredient_name\": \"miso (red)\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"aka-miso\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:aka-miso",
          "ogs_version": "0.1.0",
          "category": "other"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:aka-miso\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"miso (red)\"\n  },\n  \"category\": \"other\",\n  \"description\": {\n    \"en\": \"Red miso, or aka miso, is a type of fermented soybean paste with a robust and salty umami flavor. It is darker in color and aged longer than white miso, resulting in a deeper and more intense taste.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 1,\n      \"salty\": 7,\n      \"bitter\": 1,\n      \"umami\": 8\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 2,\n      \"tannin\": 0,\n      \"astringency\": 1,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"fermentation\",\n        \"item\": \"soy\",\n        \"intensity\": 8\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"fermentation\",\n        \"item\": \"umami\",\n        \"intensity\": 7\n      }\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 7\n  },\n  \"notes\": {\n    \"en\": \"Red miso is commonly used in Japanese cuisine for soups, marinades, and sauces, valued for its deep and rich flavor.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:aka-miso",
                "expected": "ogs:core:ingredient:aka-miso",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "other",
                "expected": "other",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 363,
            "latency_ms": 3773.6,
            "usd": 0.001582,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:aka-miso\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"miso (red)\"\n  },\n  \"category\": \"other\",\n  \"description\": {\n    \"en\": \"Red miso is a fermented soybean paste known for its strong, savory umami flavor and deep reddish-brown color, commonly used in Japanese cuisine.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 3,\n      \"salty\": 6,\n      \"bitter\": 2,\n      \"umami\": 8\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"fermented\",\n        \"item\": \"soy\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"fermented\",\n        \"item\": \"fermented soybean\",\n        \"intensity\": 6\n      }\n    ],\n    \"complexity\": 7\n  },\n  \"notes\": {\n    \"en\": \"Red miso is aged longer than white miso, leading to a stronger flavor and darker color.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:aka-miso",
                "expected": "ogs:core:ingredient:aka-miso",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "other",
                "expected": "other",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 272,
            "latency_ms": 2844.9,
            "usd": 0.001436,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:aka-miso\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"miso (red)\"\n  },\n  \"category\": \"other\",\n  \"description\": {\n    \"en\": \"Red miso, a fermented soybean paste with a strong umami flavor and reddish-brown color, commonly used in Japanese cuisine.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 1,\n      \"sour\": 1,\n      \"salty\": 6,\n      \"bitter\": 1,\n      \"umami\": 8\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"fermentation\",\n        \"item\": \"fermented\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"soy\",\n        \"item\": \"soybean\",\n        \"intensity\": 4\n      }\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 7\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:aka-miso",
                "expected": "ogs:core:ingredient:aka-miso",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "other",
                "expected": "other",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2502,
            "output_tokens": 243,
            "latency_ms": 2444.7,
            "usd": 0.00139,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2502.0,
        "output_tokens_mean": 292.667,
        "latency_ms_mean": 3021.067,
        "usd_sum": 0.004408
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-008",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "hard",
      "cuisine_region": "turkish",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "791ac1beebd95233",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'Urfa chili' as an OGS ingredient. It MUST validate against the OGS ingredient schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"ingredient_name\": \"Urfa chili\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"urfa-biber\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:urfa-biber",
          "ogs_version": "0.1.0",
          "category": "spice"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:urfa-biber\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Urfa chili\"\n  },\n  \"category\": \"spice\",\n  \"description\": {\n    \"en\": \"Urfa chili, also known as Urfa biber, is a dried chili pepper from the Urfa region in Turkey, known for its smoky, raisin-like flavor with moderate heat.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 2,\n      \"salty\": 0,\n      \"bitter\": 2,\n      \"umami\": 1\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"raisin\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"smoky\",\n        \"item\": \"smoky\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"heat\",\n        \"item\": \"chili heat\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\n      \"powdery\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"Urfa region, Turkey\"\n    },\n    \"season\": {\n      \"en\": \"Autumn\"\n    },\n    \"terroir_notes\": {\n      \"en\": \"Grown in the arid climate of southeastern Turkey with sunny days and cool nights, enhancing its smoky and rich flavors.\"\n    }\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:urfa-biber",
                "expected": "ogs:core:ingredient:urfa-biber",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 388,
            "latency_ms": 3895.7,
            "usd": 0.001621,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:urfa-biber\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Urfa chili\",\n  \"category\": \"spice\",\n  \"description\": \"Urfa chili, also known as Urfa biber, is a dried Turkish chili pepper with a slightly smoky, raisin-like flavor and moderate heat.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 1,\n      \"salty\": 0,\n      \"bitter\": 1,\n      \"umami\": 1\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"spice\",\n        \"group\": \"chili\",\n        \"item\": \"smoky\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"raisin\",\n        \"intensity\": 4\n      }\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:urfa-biber",
                "expected": "ogs:core:ingredient:urfa-biber",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 238,
            "latency_ms": 2620.6,
            "usd": 0.001381,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:urfa-biber\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Urfa chili\",\n    \"tr\": \"Urfa biber\"\n  },\n  \"category\": \"spice\",\n  \"description\": {\n    \"en\": \"A type of chili pepper from the Urfa region in Turkey, known for its smoky, raisin-like flavor with moderate heat.\",\n    \"tr\": \"Türkiye'nin Urfa bölgesinden bir biber türü, dumanlı ve kuru üzüm gibi tatları ile orta derecede acılığa sahiptir.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 2,\n      \"salty\": 0,\n      \"bitter\": 1,\n      \"umami\": 1\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"raisin\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"chili\",\n        \"item\": \"smoky chili\",\n        \"intensity\": 7\n      }\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"Urfa, Turkey\",\n      \"tr\": \"Urfa, Türkiye\"\n    }\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:urfa-biber",
                "expected": "ogs:core:ingredient:urfa-biber",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "spice",
                "expected": "spice",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 330,
            "latency_ms": 4160.9,
            "usd": 0.001528,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2501.0,
        "output_tokens_mean": 318.667,
        "latency_ms_mean": 3559.067,
        "usd_sum": 0.00453
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-009",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "easy",
      "cuisine_region": "american",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "a6a51cef64e43626",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'Meyer lemon' as an OGS ingredient. It MUST validate against the OGS ingredient schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"ingredient_name\": \"Meyer lemon\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"meyer-lemon\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:meyer-lemon",
          "ogs_version": "0.1.0",
          "category": "fruit"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:meyer-lemon\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Meyer lemon\",\n  \"category\": \"fruit\",\n  \"description\": \"A type of lemon that is sweeter and less acidic than regular lemons, often used for its fragrant zest and juice.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 7,\n      \"salty\": 0,\n      \"bitter\": 1,\n      \"umami\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"citrus blossom\",\n        \"item\": \"orange blossom\",\n        \"intensity\": 3\n      }\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 5\n  },\n  \"origin\": {\n    \"region\": \"China\",\n    \"season\": \"Winter to early spring\",\n    \"terroir_notes\": \"Grows well in subtropical climates with rich soil.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:meyer-lemon",
                "expected": "ogs:core:ingredient:meyer-lemon",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fruit",
                "expected": "fruit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 272,
            "latency_ms": 3051.4,
            "usd": 0.001436,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:meyer-lemon\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Meyer lemon\"\n  },\n  \"category\": \"fruit\",\n  \"description\": {\n    \"en\": \"A hybrid citrus fruit with a thin, smooth, deep yellow to orange rind and a sweet, less acidic flavor compared to regular lemons.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 4,\n      \"sour\": 6,\n      \"salty\": 0,\n      \"bitter\": 1,\n      \"umami\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"orange blossom\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"herb\",\n        \"item\": \"lemon balm\",\n        \"intensity\": 2\n      }\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"China\"\n    },\n    \"season\": {\n      \"en\": \"Winter to early spring\"\n    }\n  },\n  \"notes\": {\n    \"en\": \"Meyer lemon is prized for its aromatic zest and sweeter, less acidic juice, often used in desserts and cocktails.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:meyer-lemon",
                "expected": "ogs:core:ingredient:meyer-lemon",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fruit",
                "expected": "fruit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 354,
            "latency_ms": 3721.3,
            "usd": 0.001567,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:meyer-lemon\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Meyer lemon\",\n  \"category\": \"fruit\",\n  \"description\": \"A citrus hybrid fruit, sweeter and less acidic than common lemons, with aromatic zest and juicy flesh.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 7,\n      \"salty\": 0,\n      \"bitter\": 1,\n      \"umami\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon zest\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"blossom\",\n        \"item\": \"orange blossom\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\n      \"juicy\",\n      \"smooth\"\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 5\n  },\n  \"origin\": {\n    \"region\": \"China\",\n    \"season\": \"winter-spring\",\n    \"terroir_notes\": \"Grows well in temperate climates; prized for fragrant peel and edible pulp.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:meyer-lemon",
                "expected": "ogs:core:ingredient:meyer-lemon",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fruit",
                "expected": "fruit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2501,
            "output_tokens": 289,
            "latency_ms": 3290.0,
            "usd": 0.001463,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2501.0,
        "output_tokens_mean": 305.0,
        "latency_ms_mean": 3354.233,
        "usd_sum": 0.004466
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-010",
      "category": "structured_output",
      "capability": "schema_valid_ingredient",
      "difficulty": "hard",
      "cuisine_region": "mexican",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.ingredient",
        "vocab.ingredient_categories",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "6d4d6d3cc23f3283",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS ingredient schema (`schema/ingredient.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/ingredient.schema.json\",\n  \"title\": \"OGS Ingredient\",\n  \"description\": \"An ingredient entity with sensory profile, category, and provenance metadata.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"category\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\",\n      \"description\": \"OGS identifier in the format ogs:<namespace>:ingredient:<id>\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\",\n      \"description\": \"OGS specification version (0.1.x or 0.2.x)\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\",\n      \"description\": \"Authoritative / endonym language of this entity's human-readable fields.\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      },\n      \"description\": \"Advisory list of languages covered by this entity's LString fields.\"\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"category\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"protein\",\n        \"vegetable\",\n        \"fruit\",\n        \"grain\",\n        \"dairy\",\n        \"fat\",\n        \"herb\",\n        \"spice\",\n        \"fungus\",\n        \"legume\",\n        \"nut\",\n        \"seed\",\n        \"sweetener\",\n        \"acid\",\n        \"salt\",\n        \"other\"\n      ]\n    },\n    \"subcategory\": {\n      \"type\": \"string\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"origin\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"region\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"season\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"terroir_notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Ingredient categories\n\nprotein, vegetable, fruit, grain, dairy, fat, herb, spice, fungus, legume, nut, seed, sweetener, acid, salt, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document that describes 'wild huitlacoche' as an OGS ingredient. It MUST validate against the OGS ingredient schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"ingredient_name\": \"wild huitlacoche\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"huitlacoche\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/ingredient.schema.json",
        "required_fields": {
          "id": "ogs:core:ingredient:huitlacoche",
          "ogs_version": "0.1.0",
          "category": "fungus"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:huitlacoche\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"wild huitlacoche\"\n  },\n  \"category\": \"fungus\",\n  \"description\": {\n    \"en\": \"Wild huitlacoche, also known as corn smut, is an edible fungus that grows on ears of corn, valued for its unique earthy and umami flavor.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 1,\n      \"sour\": 1,\n      \"salty\": 1,\n      \"bitter\": 2,\n      \"umami\": 8\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"earth\",\n        \"group\": \"mushroom\",\n        \"item\": \"earthy\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"umami\",\n        \"item\": \"fungal\",\n        \"intensity\": 6\n      }\n    ],\n    \"texture\": [\n      \"soft\",\n      \"slightly chewy\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"Mexico\"\n    },\n    \"season\": {\n      \"en\": \"summer to early fall\"\n    }\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:huitlacoche",
                "expected": "ogs:core:ingredient:huitlacoche",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fungus",
                "expected": "fungus",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2503,
            "output_tokens": 308,
            "latency_ms": 3340.1,
            "usd": 0.001494,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:huitlacoche\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"wild huitlacoche\"\n  },\n  \"category\": \"fungus\",\n  \"description\": {\n    \"en\": \"Wild huitlacoche, also known as corn smut, is a fungal delicacy that grows on ears of corn. It has a unique earthy and smoky flavor with sweet and umami notes.\"\n  },\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 1,\n      \"salty\": 1,\n      \"bitter\": 2,\n      \"umami\": 6\n    },\n    \"structural\": {\n      \"body\": 4,\n      \"acidity_perception\": 1,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"earth\",\n        \"group\": \"mushroom\",\n        \"item\": \"earthy\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"smoke\",\n        \"item\": \"smoky\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"sweet\",\n        \"item\": \"sweet corn\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"soft\",\n      \"slightly chewy\"\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": {\n      \"en\": \"Mexico\"\n    },\n    \"season\": {\n      \"en\": \"Summer\"\n    },\n    \"terroir_notes\": {\n      \"en\": \"Grows parasitically on corn plants in wild settings, contributing to its unique flavor.\"\n    }\n  },\n  \"notes\": {\n    \"en\": \"A traditional delicacy in Mexican cuisine, prized for its umami complexity and distinct flavor.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:huitlacoche",
                "expected": "ogs:core:ingredient:huitlacoche",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fungus",
                "expected": "fungus",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2503,
            "output_tokens": 469,
            "latency_ms": 4508.6,
            "usd": 0.001752,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:ingredient:huitlacoche\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"wild huitlacoche\",\n  \"category\": \"fungus\",\n  \"description\": \"Wild huitlacoche, also known as corn smut, is a culinary fungus that grows on ears of corn. It has an earthy, smoky, and slightly sweet flavor profile, popular in Mexican cuisine.\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 0.5,\n      \"salty\": 0.2,\n      \"bitter\": 1,\n      \"umami\": 5\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"earth\",\n        \"group\": \"mushroom\",\n        \"item\": \"earthy\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"smoke\",\n        \"group\": \"smoky\",\n        \"item\": \"smoky\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"sweet\",\n        \"item\": \"sweet\",\n        \"intensity\": 2\n      }\n    ],\n    \"texture\": [\n      \"soft\",\n      \"slightly chewy\"\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 6\n  },\n  \"origin\": {\n    \"region\": \"Mexico\",\n    \"season\": \"summer to autumn\",\n    \"terroir_notes\": \"Grows naturally on corn plants, benefits from warm climates and native corn varieties.\"\n  },\n  \"notes\": \"Highly regarded as a delicacy in Mexican cuisine, often used in quesadillas, soups, and tamales.\"\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'smoke' is not one of ['fruit', 'floral', 'herbal', 'spice', 'earth', 'wood', 'dairy', 'savory', 'confection', 'vegetal', 'marine']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:ingredient:huitlacoche",
                "expected": "ogs:core:ingredient:huitlacoche",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "category": {
                "predicted": "fungus",
                "expected": "fungus",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2503,
            "output_tokens": 378,
            "latency_ms": 3782.6,
            "usd": 0.001606,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2503.0,
        "output_tokens_mean": 385.0,
        "latency_ms_mean": 3877.1,
        "usd_sum": 0.004852
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-011",
      "category": "structured_output",
      "capability": "schema_valid_beverage",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.beverage",
        "vocab.beverage_types",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "6615664480900680",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS beverage schema (`schema/beverage.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/beverage.schema.json\",\n  \"title\": \"OGS Beverage\",\n  \"description\": \"A beverage entity with type, structural attributes, sensory profile, and provenance.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"type\",\n    \"sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"type\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"wine\",\n        \"beer\",\n        \"spirit\",\n        \"cider\",\n        \"sake\",\n        \"other\"\n      ]\n    },\n    \"subtype\": {\n      \"type\": \"string\"\n    },\n    \"grape_varieties\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"string\"\n      }\n    },\n    \"vintage\": {\n      \"type\": \"integer\"\n    },\n    \"region\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"appellation\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"producer\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"abv\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"residual_sugar\": {\n      \"type\": \"number\",\n      \"minimum\": 0\n    },\n    \"serve_temperature_c\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"min\": {\n          \"type\": \"number\"\n        },\n        \"max\": {\n          \"type\": \"number\"\n        }\n      },\n      \"additionalProperties\": false,\n      \"description\": \"Recommended serving temperature range in degrees Celsius (SI canonical).\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document describing 'Barolo' as an OGS beverage. It MUST validate against the OGS beverage schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"beverage_name\": \"Barolo\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"barolo-docg\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/beverage.schema.json",
        "required_fields": {
          "id": "ogs:core:beverage:barolo-docg",
          "ogs_version": "0.1.0",
          "type": "wine"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:barolo-docg\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Barolo\",\n  \"type\": \"wine\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 0.5,\n      \"sour\": 6,\n      \"salty\": 0,\n      \"bitter\": 5,\n      \"umami\": 2\n    },\n    \"structural\": {\n      \"body\": 8,\n      \"acidity_perception\": 7,\n      \"tannin\": 8,\n      \"astringency\": 7,\n      \"alcohol_heat\": 6,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"berry\",\n        \"item\": \"cherry\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"prune\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"rose\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"truffle\",\n        \"item\": \"truffle\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"aromatic spice\",\n        \"item\": \"licorice\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"oak\",\n        \"item\": \"oak\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\"velvety\", \"full-bodied\"],\n    \"finish_length\": 8,\n    \"complexity\": 9\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:barolo-docg",
                "expected": "ogs:core:beverage:barolo-docg",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2507,
            "output_tokens": 413,
            "latency_ms": 4119.1,
            "usd": 0.001664,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:barolo-docg\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Barolo\",\n  \"type\": \"wine\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 0.5,\n      \"sour\": 6,\n      \"salty\": 0,\n      \"bitter\": 4,\n      \"umami\": 3\n    },\n    \"structural\": {\n      \"body\": 7,\n      \"acidity_perception\": 6,\n      \"tannin\": 8,\n      \"astringency\": 7,\n      \"alcohol_heat\": 6,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"berry\",\n        \"item\": \"cherry\",\n        \"qualifier\": \"fresh\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"fig\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"rose\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"herb\",\n        \"item\": \"mint\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"spice\",\n        \"item\": \"clove\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"soil\",\n        \"item\": \"truffle\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"wood\",\n        \"item\": \"cedar\",\n        \"intensity\": 5\n      }\n    ],\n    \"texture\": [\n      \"full-bodied\",\n      \"velvety\",\n      \"complex\"\n    ],\n    \"finish_length\": 8,\n    \"complexity\": 8\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:barolo-docg",
                "expected": "ogs:core:beverage:barolo-docg",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2507,
            "output_tokens": 462,
            "latency_ms": 4928.4,
            "usd": 0.001742,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:barolo-docg\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Barolo\",\n  \"type\": \"wine\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 1,\n      \"sour\": 6,\n      \"salty\": 0,\n      \"bitter\": 5,\n      \"umami\": 1\n    },\n    \"structural\": {\n      \"body\": 8,\n      \"acidity_perception\": 7,\n      \"tannin\": 8,\n      \"astringency\": 7,\n      \"alcohol_heat\": 6,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"red\",\n        \"item\": \"cherry\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried\",\n        \"item\": \"plum\",\n        \"intensity\": 5,\n        \"qualifier\": \"dried\"\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"rose\",\n        \"item\": \"rose\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"truffle\",\n        \"item\": \"truffle\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"anise\",\n        \"item\": \"anise\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"oak\",\n        \"item\": \"oak\",\n        \"intensity\": 5\n      }\n    ],\n    \"texture\": [\n      \"velvety\",\n      \"rich\",\n      \"full-bodied\"\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 8\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:barolo-docg",
                "expected": "ogs:core:beverage:barolo-docg",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "wine",
                "expected": "wine",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2507,
            "output_tokens": 426,
            "latency_ms": 4095.3,
            "usd": 0.001684,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2507.0,
        "output_tokens_mean": 433.667,
        "latency_ms_mean": 4380.933,
        "usd_sum": 0.00509
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-012",
      "category": "structured_output",
      "capability": "schema_valid_beverage",
      "difficulty": "hard",
      "cuisine_region": "scottish",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.beverage",
        "vocab.beverage_types",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "bd330891a3e68b59",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS beverage schema (`schema/beverage.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/beverage.schema.json\",\n  \"title\": \"OGS Beverage\",\n  \"description\": \"A beverage entity with type, structural attributes, sensory profile, and provenance.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"type\",\n    \"sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"type\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"wine\",\n        \"beer\",\n        \"spirit\",\n        \"cider\",\n        \"sake\",\n        \"other\"\n      ]\n    },\n    \"subtype\": {\n      \"type\": \"string\"\n    },\n    \"grape_varieties\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"string\"\n      }\n    },\n    \"vintage\": {\n      \"type\": \"integer\"\n    },\n    \"region\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"appellation\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"producer\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"abv\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"residual_sugar\": {\n      \"type\": \"number\",\n      \"minimum\": 0\n    },\n    \"serve_temperature_c\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"min\": {\n          \"type\": \"number\"\n        },\n        \"max\": {\n          \"type\": \"number\"\n        }\n      },\n      \"additionalProperties\": false,\n      \"description\": \"Recommended serving temperature range in degrees Celsius (SI canonical).\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document describing 'Islay single malt' as an OGS beverage. It MUST validate against the OGS beverage schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"beverage_name\": \"Islay single malt\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"ardbeg-10-islay\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/beverage.schema.json",
        "required_fields": {
          "id": "ogs:core:beverage:ardbeg-10-islay",
          "ogs_version": "0.1.0",
          "type": "spirit"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:ardbeg-10-islay\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Islay single malt\",\n  \"type\": \"spirit\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 1,\n      \"salty\": 1,\n      \"bitter\": 4,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 2,\n      \"tannin\": 0,\n      \"astringency\": 2,\n      \"alcohol_heat\": 7,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"smoke\",\n        \"group\": \"peat\",\n        \"item\": \"peat smoke\",\n        \"intensity\": 8\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"oak\",\n        \"item\": \"oak\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"black pepper\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"marine\",\n        \"group\": \"seaweed\",\n        \"item\": \"seaweed\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"peat\",\n        \"item\": \"peat\",\n        \"intensity\": 7\n      }\n    ],\n    \"texture\": [\n      \"oily\",\n      \"dense\"\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 8\n  }\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'smoke' is not one of ['fruit', 'floral', 'herbal', 'spice', 'earth', 'wood', 'dairy', 'savory', 'confection', 'vegetal', 'marine']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:ardbeg-10-islay",
                "expected": "ogs:core:beverage:ardbeg-10-islay",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "spirit",
                "expected": "spirit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2513,
            "output_tokens": 381,
            "latency_ms": 4523.3,
            "usd": 0.001615,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:ardbeg-10-islay\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Islay single malt\",\n  \"type\": \"spirit\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 0,\n      \"salty\": 1,\n      \"bitter\": 3,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 0,\n      \"tannin\": 1,\n      \"astringency\": 0,\n      \"alcohol_heat\": 7,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"wood\",\n        \"group\": \"oak\",\n        \"item\": \"peat smoke\",\n        \"intensity\": 8\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon zest\",\n        \"intensity\": 4,\n        \"qualifier\": \"zest\"\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"heather\",\n        \"item\": \"heather\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"black pepper\",\n        \"intensity\": 2\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"marine\",\n        \"item\": \"seaweed\",\n        \"intensity\": 5\n      }\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 8\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:ardbeg-10-islay",
                "expected": "ogs:core:beverage:ardbeg-10-islay",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "spirit",
                "expected": "spirit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2513,
            "output_tokens": 380,
            "latency_ms": 4123.4,
            "usd": 0.001613,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:ardbeg-10-islay\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Islay single malt\",\n  \"type\": \"spirit\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 1,\n      \"salty\": 0.5,\n      \"bitter\": 4,\n      \"umami\": 0.5\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 2,\n      \"tannin\": 1,\n      \"astringency\": 1,\n      \"alcohol_heat\": 6,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"earth\",\n        \"group\": \"smoke\",\n        \"item\": \"peat smoke\",\n        \"intensity\": 8\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"oak\",\n        \"item\": \"oak\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"seaweed\",\n        \"item\": \"seaweed\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"black pepper\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lemon zest\",\n        \"qualifier\": \"zest\",\n        \"intensity\": 2\n      }\n    ],\n    \"texture\": [\n      \"oily\",\n      \"rich\"\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 7\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:ardbeg-10-islay",
                "expected": "ogs:core:beverage:ardbeg-10-islay",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "spirit",
                "expected": "spirit",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2513,
            "output_tokens": 399,
            "latency_ms": 4523.6,
            "usd": 0.001644,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2513.0,
        "output_tokens_mean": 386.667,
        "latency_ms_mean": 4390.1,
        "usd_sum": 0.004872
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-013",
      "category": "structured_output",
      "capability": "schema_valid_beverage",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.beverage",
        "vocab.beverage_types",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "872d23f39307a374",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS beverage schema (`schema/beverage.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/beverage.schema.json\",\n  \"title\": \"OGS Beverage\",\n  \"description\": \"A beverage entity with type, structural attributes, sensory profile, and provenance.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"type\",\n    \"sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"type\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"wine\",\n        \"beer\",\n        \"spirit\",\n        \"cider\",\n        \"sake\",\n        \"other\"\n      ]\n    },\n    \"subtype\": {\n      \"type\": \"string\"\n    },\n    \"grape_varieties\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"string\"\n      }\n    },\n    \"vintage\": {\n      \"type\": \"integer\"\n    },\n    \"region\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"appellation\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"producer\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"abv\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"residual_sugar\": {\n      \"type\": \"number\",\n      \"minimum\": 0\n    },\n    \"serve_temperature_c\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"min\": {\n          \"type\": \"number\"\n        },\n        \"max\": {\n          \"type\": \"number\"\n        }\n      },\n      \"additionalProperties\": false,\n      \"description\": \"Recommended serving temperature range in degrees Celsius (SI canonical).\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document describing 'junmai ginjo sake' as an OGS beverage. It MUST validate against the OGS beverage schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"beverage_name\": \"junmai ginjo sake\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"junmai-ginjo-generic\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/beverage.schema.json",
        "required_fields": {
          "id": "ogs:core:beverage:junmai-ginjo-generic",
          "ogs_version": "0.1.0",
          "type": "sake"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:junmai-ginjo-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"junmai ginjo sake\",\n  \"type\": \"sake\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 4,\n      \"salty\": 0.5,\n      \"bitter\": 1,\n      \"umami\": 3\n    },\n    \"structural\": {\n      \"body\": 4,\n      \"acidity_perception\": 5,\n      \"tannin\": 0,\n      \"astringency\": 1,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruity\",\n        \"group\": \"fruit\",\n        \"item\": \"apple\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"floral\",\n        \"item\": \"rice flower\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"earth\",\n        \"item\": \"yeast\",\n        \"intensity\": 2\n      }\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'fruity' is not one of ['fruit', 'floral', 'herbal', 'spice', 'earth', 'wood', 'dairy', 'savory', 'confection', 'vegetal', 'marine']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:junmai-ginjo-generic",
                "expected": "ogs:core:beverage:junmai-ginjo-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "sake",
                "expected": "sake",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 301,
            "latency_ms": 3329.4,
            "usd": 0.001488,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:junmai-ginjo-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"junmai ginjo sake\",\n  \"type\": \"sake\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 4,\n      \"salty\": 0.5,\n      \"bitter\": 0.5,\n      \"umami\": 7\n    },\n    \"structural\": {\n      \"body\": 4,\n      \"acidity_perception\": 5,\n      \"alcohol_heat\": 4,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"fruit\",\n        \"item\": \"melon\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"flower\",\n        \"item\": \"rice-flower\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\n      \"smooth\",\n      \"silky\"\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:junmai-ginjo-generic",
                "expected": "ogs:core:beverage:junmai-ginjo-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "sake",
                "expected": "sake",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 267,
            "latency_ms": 3239.2,
            "usd": 0.001434,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:junmai-ginjo-generic\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"junmai ginjo sake\",\n  \"type\": \"sake\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 4,\n      \"salty\": 1,\n      \"bitter\": 0.5,\n      \"umami\": 4\n    },\n    \"structural\": {\n      \"body\": 3,\n      \"acidity_perception\": 4,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"general\",\n        \"item\": \"apple\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"general\",\n        \"item\": \"rice flower\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"confection\",\n        \"group\": \"general\",\n        \"item\": \"sweetness\",\n        \"intensity\": 2\n      }\n    ],\n    \"texture\": [\"smooth\", \"silky\"],\n    \"finish_length\": 4,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:junmai-ginjo-generic",
                "expected": "ogs:core:beverage:junmai-ginjo-generic",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "sake",
                "expected": "sake",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 311,
            "latency_ms": 4072.4,
            "usd": 0.001504,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2516.0,
        "output_tokens_mean": 293.0,
        "latency_ms_mean": 3547.0,
        "usd_sum": 0.004426
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-014",
      "category": "structured_output",
      "capability": "schema_valid_beverage",
      "difficulty": "easy",
      "cuisine_region": "german",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.beverage",
        "vocab.beverage_types",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "3fe3fe74f8d80260",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS beverage schema (`schema/beverage.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/beverage.schema.json\",\n  \"title\": \"OGS Beverage\",\n  \"description\": \"A beverage entity with type, structural attributes, sensory profile, and provenance.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"type\",\n    \"sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"type\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"wine\",\n        \"beer\",\n        \"spirit\",\n        \"cider\",\n        \"sake\",\n        \"other\"\n      ]\n    },\n    \"subtype\": {\n      \"type\": \"string\"\n    },\n    \"grape_varieties\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"type\": \"string\"\n      }\n    },\n    \"vintage\": {\n      \"type\": \"integer\"\n    },\n    \"region\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"appellation\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"producer\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"abv\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"residual_sugar\": {\n      \"type\": \"number\",\n      \"minimum\": 0\n    },\n    \"serve_temperature_c\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"min\": {\n          \"type\": \"number\"\n        },\n        \"max\": {\n          \"type\": \"number\"\n        }\n      },\n      \"additionalProperties\": false,\n      \"description\": \"Recommended serving temperature range in degrees Celsius (SI canonical).\"\n    },\n    \"sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Beverage types\n\nwine, beer, spirit, cider, sake, other\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce a JSON document describing 'Oktoberfest Märzen lager' as an OGS beverage. It MUST validate against the OGS beverage schema. Respond with ONLY the JSON object.\n\nINPUT:\n{\n  \"beverage_name\": \"Oktoberfest Märzen lager\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"oktoberfest-marzen\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/beverage.schema.json",
        "required_fields": {
          "id": "ogs:core:beverage:oktoberfest-marzen",
          "ogs_version": "0.1.0",
          "type": "beer"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:oktoberfest-marzen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Oktoberfest Märzen lager\",\n  \"type\": \"beer\",\n  \"subtype\": \"lager\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 5,\n      \"sour\": 1,\n      \"salty\": 0,\n      \"bitter\": 3,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 2,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 6\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"dried fruit\",\n        \"item\": \"dried apricot\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"toast\",\n        \"item\": \"toasted bread\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"malt\",\n        \"item\": \"grainy malt\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"herbal spice\",\n        \"item\": \"noble hop\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"smooth\",\n      \"medium carbonation\"\n    ],\n    \"finish_length\": 5,\n    \"complexity\": 6\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:oktoberfest-marzen",
                "expected": "ogs:core:beverage:oktoberfest-marzen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "beer",
                "expected": "beer",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 364,
            "latency_ms": 4250.5,
            "usd": 0.001589,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:oktoberfest-marzen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Oktoberfest Märzen lager\",\n  \"type\": \"beer\",\n  \"subtype\": \"lager\",\n  \"sensory_profile\": {\n    \"basic_tasters\": {\n      \"sweet\": 4.5,\n      \"sour\": 2,\n      \"salty\": 0,\n      \"bitter\": 3,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 3,\n      \"tannin\": 0,\n      \"astringency\": 1,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 5\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"stone fruit\",\n        \"item\": \"apricot\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"floral\",\n        \"group\": \"general\",\n        \"item\": \"floral\",\n        \"intensity\": 2\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"sweet spice\",\n        \"item\": \"clove\",\n        \"intensity\": 2\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"toasted\",\n        \"item\": \"toast\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"smooth\",\n      \"medium carbonation\"\n    ],\n    \"finish_length\": 4,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:oktoberfest-marzen",
                "expected": "ogs:core:beverage:oktoberfest-marzen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "beer",
                "expected": "beer",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 360,
            "latency_ms": 3743.9,
            "usd": 0.001582,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:beverage:oktoberfest-marzen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"Oktoberfest Märzen lager\",\n  \"type\": \"beer\",\n  \"subtype\": \"märzen lager\",\n  \"sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 4,\n      \"sour\": 2,\n      \"salty\": 0,\n      \"bitter\": 3,\n      \"umami\": 0\n    },\n    \"structural\": {\n      \"body\": 6,\n      \"acidity_perception\": 3,\n      \"tannin\": 0,\n      \"astringency\": 1,\n      \"alcohol_heat\": 3,\n      \"effervescence\": 5\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"fruit\",\n        \"item\": \"orange peel\",\n        \"intensity\": 3\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"spice\",\n        \"item\": \"clove\",\n        \"intensity\": 2\n      },\n      {\n        \"family\": \"wood\",\n        \"group\": \"malt\",\n        \"item\": \"toasted malt\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"fruit\",\n        \"item\": \"dried apricot\",\n        \"intensity\": 3\n      }\n    ],\n    \"texture\": [\n      \"smooth\",\n      \"medium\",\n      \"carbonated\"\n    ],\n    \"finish_length\": 6,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:beverage:oktoberfest-marzen",
                "expected": "ogs:core:beverage:oktoberfest-marzen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "type": {
                "predicted": "beer",
                "expected": "beer",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 2516,
            "output_tokens": 365,
            "latency_ms": 3809.3,
            "usd": 0.00159,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 2516.0,
        "output_tokens_mean": 363.0,
        "latency_ms_mean": 3934.567,
        "usd_sum": 0.004761
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-015",
      "category": "structured_output",
      "capability": "schema_valid_pairing",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.pairing",
        "vocab.match_types",
        "vocab.explanation_codes",
        "vocab.grade_scale",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "f0c01fa20f35eb51",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS pairing schema (`schema/pairing.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/pairing.schema.json\",\n  \"title\": \"OGS Pairing\",\n  \"description\": \"A scored, explained pairing between a dish and a beverage.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"dish_ref\",\n    \"beverage_ref\",\n    \"match_types\",\n    \"overall_score\",\n    \"grade\",\n    \"explanation_codes\",\n    \"confidence\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:pairing:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"dish_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"beverage_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"match_types\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/match_type_entry\"\n      }\n    },\n    \"overall_score\": {\n      \"type\": \"integer\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"grade\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"A+\",\n        \"A\",\n        \"B+\",\n        \"B\",\n        \"C+\",\n        \"C\",\n        \"D\",\n        \"F\"\n      ]\n    },\n    \"explanation_codes\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/explanation_code_entry\"\n      }\n    },\n    \"risks\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/risk_entry\"\n      }\n    },\n    \"confidence\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 1\n    },\n    \"evidence\": {\n      \"$ref\": \"#/$defs/evidence\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"match_type_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"type\",\n        \"score\"\n      ],\n      \"properties\": {\n        \"type\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"harmony\",\n            \"contrast\",\n            \"bridge\",\n            \"intensity_alignment\",\n            \"structural_balance\",\n            \"refresh\"\n          ]\n        },\n        \"score\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 100\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"primary\": {\n          \"type\": \"boolean\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"explanation_code_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"weight\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 1\n        },\n        \"detail\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"risk_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\",\n        \"severity\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"severity\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"low\",\n            \"medium\",\n            \"high\"\n          ]\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"mitigation\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"evidence\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"expert_panel\",\n            \"instrumental\",\n            \"computed\",\n            \"estimated\"\n          ]\n        },\n        \"sources\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n### OGS Explanation Codes\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS pairing JSON document for the described match. It MUST validate against the OGS pairing schema. Respond with ONLY the JSON.\n\nINPUT:\n{\n  \"pairing_slug\": \"crottin-sancerre\",\n  \"dish_ref\": \"ogs:core:dish:crottin-de-chavignol\",\n  \"beverage_ref\": \"ogs:core:beverage:sancerre-blanc\",\n  \"intent\": \"Classic Loire regional pairing: Sancerre's acidity and citrus meet fresh goat cheese's tang.\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/pairing.schema.json",
        "required_fields": {
          "id": "ogs:core:pairing:crottin-sancerre",
          "ogs_version": "0.1.0",
          "dish_ref": "ogs:core:dish:crottin-de-chavignol",
          "beverage_ref": "ogs:core:beverage:sancerre-blanc"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:crottin-sancerre\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Crottin de Chavignol with Sancerre Blanc\"\n  },\n  \"dish_ref\": \"ogs:core:dish:crottin-de-chavignol\",\n  \"beverage_ref\": \"ogs:core:beverage:sancerre-blanc\",\n  \"match_types\": [\n    {\n      \"type\": \"harmony\",\n      \"score\": 88,\n      \"description\": {\n        \"en\": \"Regional affinity between Loire Valley cheese and wine enhances harmony.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 90,\n      \"description\": {\n        \"en\": \"Sancerre's bright acidity balances the fresh tang and texture of the goat cheese.\"\n      }\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"Acidity in Sancerre refreshes the palate between bites of creamy cheese.\"\n      }\n    },\n    {\n      \"type\": \"aromatic_bridge\",\n      \"score\": 75,\n      \"description\": {\n        \"en\": \"Shared citrus and light herbal aromatics create a subtle aromatic connection.\"\n      }\n    }\n  ],\n  \"overall_score\": 88,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"HARMONY_REGIONAL\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"Both dish and beverage originate from the Loire Valley region.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"The wine's acidity cuts through the cheese's richness and complements its tangy flavor.\"\n      }\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"High acidity refreshes the palate between cheese bites.\"\n      }\n    },\n    {\n      \"code\": \"AROMA_SHARED_HERBAL\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"Herbal and citrus aromatics are shared between the wine and cheese, linking them aromatically.\"\n      }\n    }\n  ],\n  \"confidence\": 0.9,\n  \"description\": {\n    \"en\": \"Classic Loire regional pairing: the crisp acidity and citrus notes of Sancerre Blanc beautifully complement the fresh tang and herbal notes of Crottin de Chavignol goat cheese.\"\n  }\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'aromatic_bridge' is not one of ['harmony', 'contrast', 'bridge', 'intensity_alignment', 'structural_balance', 'refresh']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:crottin-sancerre",
                "expected": "ogs:core:pairing:crottin-sancerre",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:crottin-de-chavignol",
                "expected": "ogs:core:dish:crottin-de-chavignol",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:sancerre-blanc",
                "expected": "ogs:core:beverage:sancerre-blanc",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3382,
            "output_tokens": 579,
            "latency_ms": 6691.7,
            "usd": 0.002279,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:crottin-sancerre\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Crottin de Chavignol and Sancerre Blanc Pairing\"\n  },\n  \"dish_ref\": \"ogs:core:dish:crottin-de-chavignol\",\n  \"beverage_ref\": \"ogs:core:beverage:sancerre-blanc\",\n  \"match_types\": [\n    {\n      \"type\": \"harmony\",\n      \"score\": 90,\n      \"description\": {\n        \"en\": \"Regional affinity ties the Loire valley crottin cheese to the Sancerre wine, uniting terroir and tradition.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"contrast\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"Sancerre's crisp acidity contrasts with the creamy, tangy goat cheese, balancing richness with freshness.\"\n      }\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 80,\n      \"description\": {\n        \"en\": \"High acidity in Sancerre refreshes the palate between bites of rich, fresh goat cheese.\"\n      }\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"Acidity balance between wine and cheese enhances the overall freshness and flavor integration.\"\n      }\n    }\n  ],\n  \"overall_score\": 88,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"HARMONY_REGIONAL\",\n      \"weight\": 0.4,\n      \"detail\": {\n        \"en\": \"Both dish and beverage originate from the Loire Valley, enhancing compatibility.\"\n      }\n    },\n    {\n      \"code\": \"CONTRAST_SWEET_ACID\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"The acidity in the wine contrasts the natural mild sweetness and tang of the cheese.\"\n      }\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"The wine's acidity refreshes the palate, cutting through the creamy texture of the cheese.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.1,\n      \"detail\": {\n        \"en\": \"Balanced acidity between the wine and cheese supports a harmonious taste experience.\"\n      }\n    }\n  ],\n  \"confidence\": 0.95,\n  \"notes\": {\n    \"en\": \"A classic pairing highlighting balanced acidity and regional harmony, ideal for fresh crottin cheeses and crisp white wines like Sancerre.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:crottin-sancerre",
                "expected": "ogs:core:pairing:crottin-sancerre",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:crottin-de-chavignol",
                "expected": "ogs:core:dish:crottin-de-chavignol",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:sancerre-blanc",
                "expected": "ogs:core:beverage:sancerre-blanc",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3382,
            "output_tokens": 596,
            "latency_ms": 6037.5,
            "usd": 0.002306,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:crottin-sancerre\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Crottin de Chavignol and Sancerre Blanc\"\n  },\n  \"dish_ref\": \"ogs:core:dish:crottin-de-chavignol\",\n  \"beverage_ref\": \"ogs:core:beverage:sancerre-blanc\",\n  \"match_types\": [\n    {\n      \"type\": \"harmony\",\n      \"score\": 90,\n      \"description\": {\n        \"en\": \"Regional affinity between Loire Valley goat cheese and Sancerre Blanc wine.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"High acidity in Sancerre Blanc refreshes the palate and balances the tangy goat cheese.\"\n      }\n    },\n    {\n      \"type\": \"contrast\",\n      \"score\": 80,\n      \"description\": {\n        \"en\": \"Acidity of wine contrasts with the creamy, tangy character of goat cheese creating balance.\"\n      }\n    }\n  ],\n  \"overall_score\": 88,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"HARMONY_REGIONAL\",\n      \"weight\": 0.4,\n      \"detail\": {\n        \"en\": \"Both crottin de Chavignol cheese and Sancerre Blanc originate from the Loire Valley, reflecting regional compatibility.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_ACIDITY_BALANCE\",\n      \"weight\": 0.35,\n      \"detail\": {\n        \"en\": \"The acidity of Sancerre Blanc cuts through the creamy fat and complements the tanginess of goat cheese.\"\n      }\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.25,\n      \"detail\": {\n        \"en\": \"The crisp acidity in the wine refreshes the palate and balances the richness of the cheese.\"\n      }\n    }\n  ],\n  \"confidence\": 0.95,\n  \"notes\": {\n    \"en\": \"A classic Loire regional pairing where bright citrus and mineral notes in Sancerre Blanc highlight the fresh, tangy quality of crottin de Chavignol cheese.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:crottin-sancerre",
                "expected": "ogs:core:pairing:crottin-sancerre",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:crottin-de-chavignol",
                "expected": "ogs:core:dish:crottin-de-chavignol",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:sancerre-blanc",
                "expected": "ogs:core:beverage:sancerre-blanc",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3382,
            "output_tokens": 507,
            "latency_ms": 4883.0,
            "usd": 0.002164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3382.0,
        "output_tokens_mean": 560.667,
        "latency_ms_mean": 5870.733,
        "usd_sum": 0.006749
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-016",
      "category": "structured_output",
      "capability": "schema_valid_dish",
      "difficulty": "hard",
      "cuisine_region": "japanese",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.dish",
        "vocab.component_roles",
        "vocab.cooking_methods",
        "vocab.courses",
        "vocab.service_temperatures",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "284b25ae8f0dc3ef",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS dish schema (`schema/dish.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/dish.schema.json\",\n  \"title\": \"OGS Dish\",\n  \"description\": \"A composed dish with components, cooking methods, and overall sensory profile.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"components\",\n    \"overall_sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"cuisine\": {\n      \"type\": \"string\",\n      \"description\": \"Free-text cuisine name (v0.1) OR a cuisine entity reference matching ogs:<ns>:cuisine:<id> (v0.2+).\"\n    },\n    \"course\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"amuse\",\n        \"appetizer\",\n        \"soup\",\n        \"salad\",\n        \"fish\",\n        \"main\",\n        \"cheese\",\n        \"dessert\",\n        \"petit_four\"\n      ]\n    },\n    \"service_temperature\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"frozen\",\n        \"cold\",\n        \"cool\",\n        \"room\",\n        \"warm\",\n        \"hot\",\n        \"very_hot\"\n      ]\n    },\n    \"components\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/component\"\n      }\n    },\n    \"overall_sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"component\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"role\",\n        \"ingredient_ref\"\n      ],\n      \"properties\": {\n        \"role\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"main_protein\",\n            \"main_vegetable\",\n            \"starch\",\n            \"sauce\",\n            \"fat_component\",\n            \"acid_component\",\n            \"aromatic_garnish\",\n            \"seasoning\",\n            \"textural_element\"\n          ]\n        },\n        \"ingredient_ref\": {\n          \"type\": \"string\",\n          \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\"\n        },\n        \"cooking_method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"raw\",\n            \"seared\",\n            \"grilled\",\n            \"roasted\",\n            \"braised\",\n            \"poached\",\n            \"steamed\",\n            \"fried\",\n            \"confit\",\n            \"smoked\",\n            \"fermented\",\n            \"cured\",\n            \"pickled\",\n            \"dehydrated\",\n            \"sous_vide\",\n            \"blanched\",\n            \"sauteed\",\n            \"baked\",\n            \"charred\",\n            \"emulsified\"\n          ]\n        },\n        \"maillard_index\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"reduction_level\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"proportion\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"dominant\",\n            \"significant\",\n            \"accent\",\n            \"trace\"\n          ]\n        },\n        \"sensory_contribution\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"basic_tastes\": {\n              \"$ref\": \"#/$defs/basic_tastes\"\n            },\n            \"aromatics\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/$defs/aromatic_entry\"\n              }\n            },\n            \"texture\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"additionalProperties\": true\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS dish JSON for tonkotsu ramen (pork-bone broth, wheat noodles, chashu, soft egg, nori, scallion). Respond with ONLY the JSON.\n\nINPUT:\n{\n  \"dish_name\": \"tonkotsu ramen\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"tonkotsu-ramen\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/dish.schema.json",
        "required_fields": {
          "id": "ogs:core:dish:tonkotsu-ramen",
          "ogs_version": "0.1.0",
          "course": "main"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:tonkotsu-ramen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"tonkotsu ramen\",\n  \"course\": \"main\",\n  \"service_temperature\": \"hot\",\n  \"components\": [\n    {\n      \"role\": \"sauce\",\n      \"ingredient_ref\": \"ogs:core:ingredient:pork-bone-broth\",\n      \"cooking_method\": \"braised\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 8,\n          \"salty\": 6,\n          \"sweet\": 2,\n          \"bitter\": 1,\n          \"sour\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"meaty\",\n            \"item\": \"pork\",\n            \"intensity\": 7\n          },\n          {\n            \"family\": \"earth\",\n            \"group\": \"bone\",\n            \"item\": \"marrow\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"rich\",\n          \"silky\"\n        ]\n      }\n    },\n    {\n      \"role\": \"starch\",\n      \"ingredient_ref\": \"ogs:core:ingredient:wheat-noodles\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"texture\": [\n          \"chewy\",\n          \"springy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:chashu-pork\",\n      \"cooking_method\": \"braised\",\n      \"maillard_index\": 3,\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 7,\n          \"salty\": 6,\n          \"sweet\": 3,\n          \"bitter\": 1,\n          \"sour\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"meaty\",\n            \"item\": \"braised pork\",\n            \"intensity\": 7\n          },\n          {\n            \"family\": \"spice\",\n            \"group\": \"seasoning\",\n            \"item\": \"soy sauce\",\n            \"intensity\": 4\n          }\n        ],\n        \"texture\": [\n          \"tender\",\n          \"fatty\"\n        ]\n      }\n    },\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:soft-boiled-egg\",\n      \"cooking_method\": \"poached\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 5,\n          \"salty\": 1,\n          \"sweet\": 1\n        },\n        \"texture\": [\n          \"creamy\",\n          \"soft\"\n        ],\n        \"aromatics\": [\n          {\n            \"family\": \"dairy\",\n            \"group\": \"egg\",\n            \"item\": \"yolk\",\n            \"intensity\": 3\n          }\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:nori-seaweed\",\n      \"cooking_method\": \"dried\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"seaweed\",\n            \"item\": \"nori\",\n            \"intensity\": 6\n          }\n        ],\n        \"basic_tastes\": {\n          \"umami\": 4,\n          \"salty\": 2\n        },\n        \"texture\": [\n          \"crisp\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:scallion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"bitter\": 1,\n          \"sweet\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"onion\",\n            \"item\": \"scallion\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\n          \"crisp\",\n          \"fresh\"\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"umami\": 8,\n      \"salty\": 6,\n      \"sweet\": 2,\n      \"bitter\": 1,\n      \"sour\": 0\n    },\n    \"structural\": {\n      \"body\": 8,\n      \"acidity_perception\": 2,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"meaty\",\n        \"item\": \"pork\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"marine\",\n        \"group\": \"seaweed\",\n        \"item\": \"nori\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"onion\",\n        \"item\": \"scallion\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"bone\",\n        \"item\": \"marrow\",\n        \"intensity\": 6\n      }\n    ],\n    \"texture\": [\n      \"rich\",\n      \"silky\",\n      \"chewy\",\n      \"tender\",\n      \"creamy\",\n      \"crisp\"\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 8\n  }\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'dried' is not one of ['raw', 'seared', 'grilled', 'roasted', 'braised', 'poached', 'steamed', 'fried', 'confit', 'smoked', 'fermented', 'cured', 'pickled', 'dehydrated', 'sous_vide', 'blanched', 'sauteed', 'baked', 'charred', 'emulsified']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:tonkotsu-ramen",
                "expected": "ogs:core:dish:tonkotsu-ramen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "course": {
                "predicted": "main",
                "expected": "main",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3078,
            "output_tokens": 1318,
            "latency_ms": 14840.7,
            "usd": 0.00334,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:tonkotsu-ramen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"tonkotsu ramen\",\n  \"components\": [\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:chashu\",\n      \"cooking_method\": \"braised\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 7,\n          \"salty\": 4,\n          \"sweet\": 2\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"meat\",\n            \"item\": \"pork\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\"tender\", \"fatty\"]\n      }\n    },\n    {\n      \"role\": \"sauce\",\n      \"ingredient_ref\": \"ogs:core:ingredient:tonkotsu-broth\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 9,\n          \"salty\": 5,\n          \"sweet\": 1,\n          \"fatty\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"broth\",\n            \"item\": \"pork bone\",\n            \"intensity\": 9\n          },\n          {\n            \"family\": \"earth\",\n            \"group\": \"creamy\",\n            \"item\": \"bone marrow\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\"creamy\", \"silky\"]\n      }\n    },\n    {\n      \"role\": \"starch\",\n      \"ingredient_ref\": \"ogs:core:ingredient:wheat-noodles\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"texture\": [\"chewy\", \"springy\"]\n      }\n    },\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:soft-boiled-egg\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 3,\n          \"sweet\": 1\n        },\n        \"texture\": [\"soft\", \"creamy\"]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:nori\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"seaweed\",\n            \"item\": \"nori\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\"thin\", \"crispy\"]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:scallion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"vegetal\",\n            \"group\": \"onion-like\",\n            \"item\": \"scallion\",\n            \"intensity\": 4\n          }\n        ],\n        \"basic_tastes\": {\n          \"bitter\": 1,\n          \"sweet\": 1\n        },\n        \"texture\": [\"crisp\"]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"umami\": 8,\n      \"salty\": 5,\n      \"sweet\": 2,\n      \"bitter\": 1\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"broth\",\n        \"item\": \"pork bone\",\n        \"intensity\": 9\n      },\n      {\n        \"family\": \"earth\",\n        \"group\": \"creamy\",\n        \"item\": \"bone marrow\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"marine\",\n        \"group\": \"seaweed\",\n        \"item\": \"nori\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"vegetal\",\n        \"group\": \"onion-like\",\n        \"item\": \"scallion\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\"creamy\", \"chewy\", \"tender\", \"crispy\", \"soft\"],\n    \"finish_length\": 6,\n    \"complexity\": 7\n  },\n  \"course\": \"main\",\n  \"service_temperature\": \"hot\"\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'boiled' is not one of ['raw', 'seared', 'grilled', 'roasted', 'braised', 'poached', 'steamed', 'fried', 'confit', 'smoked', 'fermented', 'cured', 'pickled', 'dehydrated', 'sous_vide', 'blanched', 'sauteed', 'baked', 'charred', 'emulsified']"
            ],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:tonkotsu-ramen",
                "expected": "ogs:core:dish:tonkotsu-ramen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "course": {
                "predicted": "main",
                "expected": "main",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3078,
            "output_tokens": 1054,
            "latency_ms": 12714.9,
            "usd": 0.002918,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:tonkotsu-ramen\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"tonkotsu ramen\",\n  \"components\": [\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:chashu\",\n      \"cooking_method\": \"braised\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 7,\n          \"salty\": 5,\n          \"sweet\": 3\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"meaty\",\n            \"item\": \"pork\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\n          \"tender\",\n          \"fatty\"\n        ]\n      }\n    },\n    {\n      \"role\": \"main_protein\",\n      \"ingredient_ref\": \"ogs:core:ingredient:soft-boiled-egg\",\n      \"cooking_method\": \"poached\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 3,\n          \"salty\": 2,\n          \"sweet\": 2,\n          \"bitter\": 1\n        },\n        \"aromatics\": [],\n        \"texture\": [\n          \"creamy\",\n          \"soft\"\n        ]\n      }\n    },\n    {\n      \"role\": \"starch\",\n      \"ingredient_ref\": \"ogs:core:ingredient:wheat-noodles\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 2\n        },\n        \"texture\": [\n          \"chewy\",\n          \"springy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"sauce\",\n      \"ingredient_ref\": \"ogs:core:ingredient:tonkotsu-broth\",\n      \"cooking_method\": \"boiled\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 9,\n          \"salty\": 6,\n          \"sweet\": 3,\n          \"bitter\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"broth\",\n            \"item\": \"pork-bone\",\n            \"intensity\": 8\n          },\n          {\n            \"family\": \"earth\",\n            \"group\": \"umami\",\n            \"item\": \"mushroom\",\n            \"intensity\": 3\n          }\n        ],\n        \"texture\": [\n          \"rich\",\n          \"silky\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:nori\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"umami\": 3,\n          \"salty\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"marine\",\n            \"group\": \"seaweed\",\n            \"item\": \"nori\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\n          \"crisp\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:scallion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 1,\n          \"bitter\": 2\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"vegetal\",\n            \"group\": \"allium\",\n            \"item\": \"scallion\",\n            \"intensity\": 4\n          }\n        ],\n        \"texture\": [\n          \"crisp\",\n          \"fresh\"\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 0.5,\n      \"salty\": 5.5,\n      \"bitter\": 1.5,\n      \"umami\": 8\n    },\n    \"structural\": {\n      \"body\": 8,\n      \"acidity_perception\": 1,\n      \"tannin\": 0,\n      \"astringency\": 0.5,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"savory\",\n        \"group\": \"broth\",\n        \"item\": \"pork-bone\",\n        \"intensity\": 8\n      },\n      {\n        \"family\": \"marine\",\n        \"group\": \"seaweed\",\n        \"item\": \"nori\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"vegetal\",\n        \"group\": \"allium\",\n        \"item\": \"scallion\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\n      \"rich\",\n      \"silky\",\n      \"chewy\",\n      \"tender\",\n      \"creamy\",\n      \"crisp\"\n    ],\n    \"finish_length\": 7,\n    \"complexity\": 7\n  }\n}",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "schema_valid": false,
            "schema_errors": [
              "'boiled' is not one of ['raw', 'seared', 'grilled', 'roasted', 'braised', 'poached', 'steamed', 'fried', 'confit', 'smoked', 'fermented', 'cured', 'pickled', 'dehydrated', 'sous_vide', 'blanched', 'sauteed', 'baked', 'charred', 'emulsified']"
            ],
            "consistent": false,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:tonkotsu-ramen",
                "expected": "ogs:core:dish:tonkotsu-ramen",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "course": {
                "predicted": null,
                "expected": "main",
                "match": false
              }
            }
          },
          "metrics": {
            "input_tokens": 3078,
            "output_tokens": 1186,
            "latency_ms": 13935.6,
            "usd": 0.003129,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3078.0,
        "output_tokens_mean": 1186.0,
        "latency_ms_mean": 13830.4,
        "usd_sum": 0.009387
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-017",
      "category": "structured_output",
      "capability": "schema_valid_dish",
      "difficulty": "medium",
      "cuisine_region": "mexican",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.dish",
        "vocab.component_roles",
        "vocab.cooking_methods",
        "vocab.courses",
        "vocab.service_temperatures",
        "vocab.aromatic_families",
        "scales.basic_tastes",
        "scales.structural",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "534d8590759d6dc3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS dish schema (`schema/dish.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/dish.schema.json\",\n  \"title\": \"OGS Dish\",\n  \"description\": \"A composed dish with components, cooking methods, and overall sensory profile.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"components\",\n    \"overall_sensory_profile\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"cuisine\": {\n      \"type\": \"string\",\n      \"description\": \"Free-text cuisine name (v0.1) OR a cuisine entity reference matching ogs:<ns>:cuisine:<id> (v0.2+).\"\n    },\n    \"course\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"amuse\",\n        \"appetizer\",\n        \"soup\",\n        \"salad\",\n        \"fish\",\n        \"main\",\n        \"cheese\",\n        \"dessert\",\n        \"petit_four\"\n      ]\n    },\n    \"service_temperature\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"frozen\",\n        \"cold\",\n        \"cool\",\n        \"room\",\n        \"warm\",\n        \"hot\",\n        \"very_hot\"\n      ]\n    },\n    \"components\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/component\"\n      }\n    },\n    \"overall_sensory_profile\": {\n      \"$ref\": \"#/$defs/sensory_profile\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"component\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"role\",\n        \"ingredient_ref\"\n      ],\n      \"properties\": {\n        \"role\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"main_protein\",\n            \"main_vegetable\",\n            \"starch\",\n            \"sauce\",\n            \"fat_component\",\n            \"acid_component\",\n            \"aromatic_garnish\",\n            \"seasoning\",\n            \"textural_element\"\n          ]\n        },\n        \"ingredient_ref\": {\n          \"type\": \"string\",\n          \"pattern\": \"^ogs:[a-z0-9-]+:ingredient:[a-z0-9-]+$\"\n        },\n        \"cooking_method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"raw\",\n            \"seared\",\n            \"grilled\",\n            \"roasted\",\n            \"braised\",\n            \"poached\",\n            \"steamed\",\n            \"fried\",\n            \"confit\",\n            \"smoked\",\n            \"fermented\",\n            \"cured\",\n            \"pickled\",\n            \"dehydrated\",\n            \"sous_vide\",\n            \"blanched\",\n            \"sauteed\",\n            \"baked\",\n            \"charred\",\n            \"emulsified\"\n          ]\n        },\n        \"maillard_index\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"reduction_level\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 5\n        },\n        \"proportion\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"dominant\",\n            \"significant\",\n            \"accent\",\n            \"trace\"\n          ]\n        },\n        \"sensory_contribution\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"basic_tastes\": {\n              \"$ref\": \"#/$defs/basic_tastes\"\n            },\n            \"aromatics\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"$ref\": \"#/$defs/aromatic_entry\"\n              }\n            },\n            \"texture\": {\n              \"type\": \"array\",\n              \"items\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"additionalProperties\": true\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"sensory_profile\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"basic_tastes\": {\n          \"$ref\": \"#/$defs/basic_tastes\"\n        },\n        \"structural\": {\n          \"$ref\": \"#/$defs/structural\"\n        },\n        \"aromatics\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"$ref\": \"#/$defs/aromatic_entry\"\n          }\n        },\n        \"texture\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"finish_length\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        },\n        \"complexity\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 10\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"basic_tastes\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"sweet\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"sour\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"salty\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"bitter\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"umami\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"structural\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"body\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"acidity_perception\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"tannin\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"astringency\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"alcohol_heat\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        },\n        \"effervescence\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"intensity_value\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 10\n    },\n    \"aromatic_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"family\",\n        \"group\",\n        \"item\",\n        \"intensity\"\n      ],\n      \"properties\": {\n        \"family\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fruit\",\n            \"floral\",\n            \"herbal\",\n            \"spice\",\n            \"earth\",\n            \"wood\",\n            \"dairy\",\n            \"savory\",\n            \"confection\",\n            \"vegetal\",\n            \"marine\"\n          ]\n        },\n        \"group\": {\n          \"type\": \"string\"\n        },\n        \"item\": {\n          \"type\": \"string\"\n        },\n        \"qualifier\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"fresh\",\n            \"dried\",\n            \"cooked\",\n            \"candied\",\n            \"fermented\",\n            \"zest\",\n            \"overripe\"\n          ]\n        },\n        \"intensity\": {\n          \"$ref\": \"#/$defs/intensity_value\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n### Dish courses\n\namuse, appetizer, soup, salad, fish, main, cheese, dessert, petit_four\n\n### Service temperatures\n\nfrozen, cold, cool, room, warm, hot, very_hot\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS dish JSON for pico de gallo (tomato, onion, cilantro, jalapeño, lime). JSON only.\n\nINPUT:\n{\n  \"dish_name\": \"pico de gallo\",\n  \"namespace\": \"core\",\n  \"id_slug\": \"pico-de-gallo\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/dish.schema.json",
        "required_fields": {
          "id": "ogs:core:dish:pico-de-gallo",
          "ogs_version": "0.1.0"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:pico-de-gallo\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"pico de gallo\",\n  \"course\": \"appetizer\",\n  \"service_temperature\": \"cool\",\n  \"components\": [\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:tomato\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"sour\": 1,\n          \"salty\": 0,\n          \"bitter\": 0,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"berry\",\n            \"item\": \"tomato\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\n          \"juicy\",\n          \"soft\"\n        ]\n      }\n    },\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:onion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 2,\n          \"sour\": 0.5,\n          \"salty\": 0,\n          \"bitter\": 0.2,\n          \"umami\": 0.5\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"savory\",\n            \"group\": \"allium\",\n            \"item\": \"onion\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"crisp\",\n          \"firm\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:cilantro\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 0,\n          \"sour\": 0,\n          \"salty\": 0,\n          \"bitter\": 0.5,\n          \"umami\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"leafy herb\",\n            \"item\": \"cilantro\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"leafy\",\n          \"fresh\"\n        ]\n      }\n    },\n    {\n      \"role\": \"seasoning\",\n      \"ingredient_ref\": \"ogs:core:ingredient:jalapeno\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 0,\n          \"sour\": 0,\n          \"salty\": 0,\n          \"bitter\": 0,\n          \"umami\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"pepper\",\n            \"item\": \"jalapeño\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\n          \"crisp\",\n          \"firm\"\n        ]\n      },\n      \"notes\": \"Adds a mild to medium heat and a characteristic pepper flavor\"\n    },\n    {\n      \"role\": \"acid_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:lime\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 1,\n          \"sour\": 8,\n          \"salty\": 0,\n          \"bitter\": 0,\n          \"umami\": 0\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"citrus\",\n            \"item\": \"lime\",\n            \"intensity\": 7,\n            \"qualifier\": \"fresh\"\n          }\n        ],\n        \"texture\": [\n          \"juicy\"\n        ]\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2,\n      \"sour\": 4,\n      \"salty\": 0,\n      \"bitter\": 0.5,\n      \"umami\": 1\n    },\n    \"structural\": {\n      \"body\": 3,\n      \"acidity_perception\": 6,\n      \"tannin\": 0,\n      \"astringency\": 1,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"berry\",\n        \"item\": \"tomato\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"savory\",\n        \"group\": \"allium\",\n        \"item\": \"onion\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"leafy herb\",\n        \"item\": \"cilantro\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"jalapeño\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lime\",\n        \"intensity\": 6,\n        \"qualifier\": \"fresh\"\n      }\n    ],\n    \"texture\": [\n      \"juicy\",\n      \"crisp\",\n      \"fresh\",\n      \"leafy\",\n      \"firm\"\n    ],\n    \"finish_length\": 3,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:pico-de-gallo",
                "expected": "ogs:core:dish:pico-de-gallo",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3063,
            "output_tokens": 1279,
            "latency_ms": 14920.0,
            "usd": 0.003272,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:pico-de-gallo\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"pico de gallo\",\n  \"components\": [\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:tomato\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 4,\n          \"sour\": 2,\n          \"salty\": 0.5,\n          \"bitter\": 0.5,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"berry\",\n            \"item\": \"tomato\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"juicy\",\n          \"firm\"\n        ]\n      }\n    },\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:onion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 2,\n          \"sour\": 0,\n          \"salty\": 0,\n          \"bitter\": 0.5,\n          \"umami\": 0.5\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"allium\",\n            \"item\": \"onion\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\n          \"crisp\"\n        ]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:cilantro\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"bitter\": 1,\n          \"umami\": 0.5\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"cilantro\",\n            \"item\": \"cilantro leaf\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\n          \"leafy\"\n        ]\n      }\n    },\n    {\n      \"role\": \"seasoning\",\n      \"ingredient_ref\": \"ogs:core:ingredient:jalapeno\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"bitter\": 1,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"pepper\",\n            \"item\": \"jalapeño\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\n          \"crisp\"\n        ]\n      }\n    },\n    {\n      \"role\": \"acid_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:lime\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sour\": 7\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"citrus\",\n            \"item\": \"lime\",\n            \"qualifier\": \"zest\",\n            \"intensity\": 4\n          },\n          {\n            \"family\": \"fruit\",\n            \"group\": \"citrus\",\n            \"item\": \"lime juice\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": []\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 3,\n      \"sour\": 5,\n      \"salty\": 1,\n      \"bitter\": 1.5,\n      \"umami\": 2\n    },\n    \"structural\": {\n      \"body\": 3,\n      \"acidity_perception\": 5,\n      \"tannin\": 0,\n      \"astringency\": 0,\n      \"alcohol_heat\": 0,\n      \"effervescence\": 0\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"berry\",\n        \"item\": \"tomato\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"allium\",\n        \"item\": \"onion\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"cilantro\",\n        \"item\": \"cilantro leaf\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"jalapeño\",\n        \"intensity\": 4\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"lime\",\n        \"intensity\": 4\n      }\n    ],\n    \"texture\": [\n      \"juicy\",\n      \"crisp\",\n      \"leafy\",\n      \"firm\"\n    ],\n    \"finish_length\": 3,\n    \"complexity\": 4\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:pico-de-gallo",
                "expected": "ogs:core:dish:pico-de-gallo",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3063,
            "output_tokens": 1176,
            "latency_ms": 14717.2,
            "usd": 0.003107,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:dish:pico-de-gallo\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": \"pico de gallo\",\n  \"course\": \"appetizer\",\n  \"service_temperature\": \"cool\",\n  \"components\": [\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:tomato\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"dominant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 3,\n          \"sour\": 2,\n          \"umami\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"tomato\",\n            \"item\": \"fresh tomato\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": [\"juicy\", \"firm\"]\n      }\n    },\n    {\n      \"role\": \"main_vegetable\",\n      \"ingredient_ref\": \"ogs:core:ingredient:onion\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"significant\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sweet\": 1.5,\n          \"bitter\": 0.5,\n          \"umami\": 0.5\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"allium\",\n            \"item\": \"raw onion\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\"crisp\"]\n      }\n    },\n    {\n      \"role\": \"aromatic_garnish\",\n      \"ingredient_ref\": \"ogs:core:ingredient:cilantro\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"aromatics\": [\n          {\n            \"family\": \"herbal\",\n            \"group\": \"cilantro\",\n            \"item\": \"fresh cilantro\",\n            \"intensity\": 7\n          }\n        ],\n        \"texture\": [\"leafy\"]\n      }\n    },\n    {\n      \"role\": \"seasoning\",\n      \"ingredient_ref\": \"ogs:core:ingredient:jalapeno\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"bitter\": 1,\n          \"sour\": 1\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"spice\",\n            \"group\": \"pepper\",\n            \"item\": \"fresh jalapeño\",\n            \"intensity\": 5\n          }\n        ],\n        \"texture\": [\"crisp\"]\n      }\n    },\n    {\n      \"role\": \"acid_component\",\n      \"ingredient_ref\": \"ogs:core:ingredient:lime\",\n      \"cooking_method\": \"raw\",\n      \"proportion\": \"accent\",\n      \"sensory_contribution\": {\n        \"basic_tastes\": {\n          \"sour\": 7\n        },\n        \"aromatics\": [\n          {\n            \"family\": \"fruit\",\n            \"group\": \"citrus\",\n            \"item\": \"lime zest\",\n            \"qualifier\": \"zest\",\n            \"intensity\": 4\n          },\n          {\n            \"family\": \"fruit\",\n            \"group\": \"citrus\",\n            \"item\": \"fresh lime juice\",\n            \"intensity\": 6\n          }\n        ],\n        \"texture\": []\n      }\n    }\n  ],\n  \"overall_sensory_profile\": {\n    \"basic_tastes\": {\n      \"sweet\": 2.5,\n      \"sour\": 5.5,\n      \"salty\": 0,\n      \"bitter\": 1.5,\n      \"umami\": 0.8\n    },\n    \"aromatics\": [\n      {\n        \"family\": \"fruit\",\n        \"group\": \"tomato\",\n        \"item\": \"fresh tomato\",\n        \"intensity\": 6\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"allium\",\n        \"item\": \"raw onion\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"herbal\",\n        \"group\": \"cilantro\",\n        \"item\": \"fresh cilantro\",\n        \"intensity\": 7\n      },\n      {\n        \"family\": \"spice\",\n        \"group\": \"pepper\",\n        \"item\": \"fresh jalapeño\",\n        \"intensity\": 5\n      },\n      {\n        \"family\": \"fruit\",\n        \"group\": \"citrus\",\n        \"item\": \"fresh lime juice\",\n        \"intensity\": 6\n      }\n    ],\n    \"texture\": [\"juicy\", \"crisp\", \"leafy\", \"firm\"],\n    \"finish_length\": 2,\n    \"complexity\": 5\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:dish:pico-de-gallo",
                "expected": "ogs:core:dish:pico-de-gallo",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3063,
            "output_tokens": 1060,
            "latency_ms": 12344.6,
            "usd": 0.002921,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3063.0,
        "output_tokens_mean": 1171.667,
        "latency_ms_mean": 13993.933,
        "usd_sum": 0.0093
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-018",
      "category": "structured_output",
      "capability": "sensory_profile_json",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "json_match",
      "context_needs": [
        "scales.basic_tastes",
        "scales.structural",
        "vocab.aromatic_families",
        "vocab.qualifiers"
      ],
      "prompt_template_hash": "1470a28b8e9ef028",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Aromatic qualifiers\n\nfresh, dried, cooked, candied, fermented, zest, overripe\n\n---\n\nProduce a JSON sensory profile for 24-month aged Comté cheese. Keys: basic_tastes, structural, aromatics. All intensities on 0-10. JSON only.\n\nINPUT:\n{\n  \"item\": \"24-month aged Comté\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "value": {
          "basic_tastes": {
            "sweet": 2.0,
            "sour": 2.0,
            "salty": 5.0,
            "bitter": 1.5,
            "umami": 8.0
          },
          "structural": {
            "body": 7.0,
            "acidity_perception": 3.0,
            "tannin": 0,
            "astringency": 0,
            "alcohol_heat": 0,
            "effervescence": 0
          }
        },
        "required_keys": [
          "basic_tastes.umami",
          "basic_tastes.salty",
          "structural.body"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 4.5,\n    \"sour\": 2.0,\n    \"salty\": 3.5,\n    \"bitter\": 1.5,\n    \"umami\": 6.0\n  },\n  \"structural\": {\n    \"body\": 7.0,\n    \"acidity_perception\": 2.5,\n    \"tannin\": 0,\n    \"astringency\": 0.5,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": {\n    \"fruit\": \"dried\",\n    \"floral\": \"fresh\",\n    \"herbal\": \"fresh\",\n    \"spice\": \"dried\",\n    \"earth\": \"cooked\",\n    \"wood\": \"none\",\n    \"dairy\": \"cooked\",\n    \"savory\": \"fermented\",\n    \"confection\": \"candied\",\n    \"vegetal\": \"fresh\",\n    \"marine\": \"none\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.umami": {
                "predicted": 6.0,
                "expected": 8.0,
                "match": true
              },
              "basic_tastes.salty": {
                "predicted": 3.5,
                "expected": 5.0,
                "match": true
              },
              "structural.body": {
                "predicted": 7.0,
                "expected": 7.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 454,
            "output_tokens": 225,
            "latency_ms": 2292.2,
            "usd": 0.000542,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 3.5,\n    \"sour\": 1.0,\n    \"salty\": 4.0,\n    \"bitter\": 1.5,\n    \"umami\": 6.5\n  },\n  \"structural\": {\n    \"body\": 7.0,\n    \"acidity_perception\": 1.0,\n    \"tannin\": 0.0,\n    \"astringency\": 0.5,\n    \"alcohol_heat\": 0.0,\n    \"effervescence\": 0.0\n  },\n  \"aromatics\": {\n    \"fruit\": [\"fresh\", \"dried\"],\n    \"floral\": [\"fresh\"],\n    \"herbal\": [\"fresh\"],\n    \"spice\": [\"cooked\"],\n    \"earth\": [\"cooked\"],\n    \"wood\": [\"aged\"],\n    \"dairy\": [\"aged\"],\n    \"savory\": [],\n    \"confection\": [\"candied\"],\n    \"vegetal\": [],\n    \"marine\": []\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.umami": {
                "predicted": 6.5,
                "expected": 8.0,
                "match": true
              },
              "basic_tastes.salty": {
                "predicted": 4.0,
                "expected": 5.0,
                "match": true
              },
              "structural.body": {
                "predicted": 7.0,
                "expected": 7.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 454,
            "output_tokens": 226,
            "latency_ms": 2330.9,
            "usd": 0.000543,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 4,\n    \"sour\": 2,\n    \"salty\": 5,\n    \"bitter\": 1,\n    \"umami\": 7\n  },\n  \"structural\": {\n    \"body\": 7,\n    \"acidity_perception\": 3,\n    \"tannin\": 0,\n    \"astringency\": 1,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": {\n    \"fruit\": [\"dried\"],\n    \"floral\": [\"fresh\"],\n    \"herbal\": [\"fresh\"],\n    \"spice\": [\"dried\"],\n    \"earth\": [\"dried\"],\n    \"wood\": [\"dried\"],\n    \"dairy\": [\"cooked\"],\n    \"savory\": [\"umami\"],\n    \"confection\": [],\n    \"vegetal\": [],\n    \"marine\": []\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.umami": {
                "predicted": 7,
                "expected": 8.0,
                "match": true
              },
              "basic_tastes.salty": {
                "predicted": 5,
                "expected": 5.0,
                "match": true
              },
              "structural.body": {
                "predicted": 7,
                "expected": 7.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 454,
            "output_tokens": 203,
            "latency_ms": 2366.8,
            "usd": 0.000506,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 454.0,
        "output_tokens_mean": 218.0,
        "latency_ms_mean": 2329.967,
        "usd_sum": 0.001591
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-019",
      "category": "structured_output",
      "capability": "sensory_profile_json",
      "difficulty": "hard",
      "cuisine_region": "japanese",
      "scorer": "json_match",
      "context_needs": [
        "scales.basic_tastes",
        "scales.structural",
        "vocab.aromatic_families",
        "vocab.qualifiers"
      ],
      "prompt_template_hash": "aa2726470aa7cfb8",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Basic tastes scale (0-10)\n\nEach of `sweet`, `sour`, `salty`, `bitter`, `umami` is rated on a\n0-10 intensity scale where 0 = not detectable, 5 = clearly present,\n10 = overwhelming. Use decimals when precision helps (e.g. 8.5).\n\n### Structural attributes scale (0-10)\n\n`body`, `acidity_perception`, `tannin`, `astringency`, `alcohol_heat`,\n`effervescence` are each rated on the same 0-10 scale as basic tastes.\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n### Aromatic qualifiers\n\nfresh, dried, cooked, candied, fermented, zest, overripe\n\n---\n\nProduce a JSON sensory profile for blistered shishito peppers lightly salted. Keys: basic_tastes, structural, aromatics.\n\nINPUT:\n{\n  \"item\": \"blistered shishito peppers with flaky salt\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "value": {
          "basic_tastes": {
            "sweet": 2.0,
            "sour": 1.0,
            "salty": 5.0,
            "bitter": 3.0,
            "umami": 2.0
          },
          "structural": {
            "body": 3.0,
            "acidity_perception": 2.0,
            "tannin": 0,
            "astringency": 1.0,
            "alcohol_heat": 0,
            "effervescence": 0
          }
        },
        "required_keys": [
          "basic_tastes.salty",
          "basic_tastes.bitter",
          "structural.body"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 2,\n    \"sour\": 1,\n    \"salty\": 3,\n    \"bitter\": 3,\n    \"umami\": 2\n  },\n  \"structural\": {\n    \"body\": 4,\n    \"acidity_perception\": 2,\n    \"tannin\": 1,\n    \"astringency\": 2,\n    \"alcohol_heat\": 0,\n    \"effervescence\": 0\n  },\n  \"aromatics\": {\n    \"fruit\": [\"fresh\"],\n    \"floral\": [],\n    \"herbal\": [\"fresh\"],\n    \"spice\": [\"fresh\"],\n    \"earth\": [\"cooked\"],\n    \"wood\": [],\n    \"dairy\": [],\n    \"savory\": [],\n    \"confection\": [],\n    \"vegetal\": [\"fresh\", \"cooked\"],\n    \"marine\": []\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.salty": {
                "predicted": 3,
                "expected": 5.0,
                "match": true
              },
              "basic_tastes.bitter": {
                "predicted": 3,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 4,
                "expected": 3.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 448,
            "output_tokens": 196,
            "latency_ms": 2225.2,
            "usd": 0.000493,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 3,\n    \"sour\": 1.5,\n    \"salty\": 3.5,\n    \"bitter\": 2,\n    \"umami\": 2\n  },\n  \"structural\": {\n    \"body\": 3,\n    \"acidity_perception\": 2,\n    \"tannin\": 0,\n    \"astringency\": 1,\n    \"alcohol_heat\": 0.5,\n    \"effervescence\": 0\n  },\n  \"aromatics\": {\n    \"families\": [\"vegetal\", \"herbal\", \"spice\"],\n    \"qualifiers\": [\"fresh\", \"cooked\"]\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.salty": {
                "predicted": 3.5,
                "expected": 5.0,
                "match": true
              },
              "basic_tastes.bitter": {
                "predicted": 2,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 3,
                "expected": 3.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 448,
            "output_tokens": 154,
            "latency_ms": 1875.3,
            "usd": 0.000426,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"basic_tastes\": {\n    \"sweet\": 2.5,\n    \"sour\": 1.0,\n    \"salty\": 3.5,\n    \"bitter\": 2.0,\n    \"umami\": 1.5\n  },\n  \"structural\": {\n    \"body\": 3.0,\n    \"acidity_perception\": 1.0,\n    \"tannin\": 1.0,\n    \"astringency\": 2.0,\n    \"alcohol_heat\": 0.0,\n    \"effervescence\": 0.0\n  },\n  \"aromatics\": [\n    {\n      \"family\": \"vegetal\",\n      \"qualifier\": \"fresh\"\n    },\n    {\n      \"family\": \"spice\",\n      \"qualifier\": \"fresh\"\n    },\n    {\n      \"family\": \"earth\",\n      \"qualifier\": \"cooked\"\n    }\n  ]\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "per_key": {
              "basic_tastes.salty": {
                "predicted": 3.5,
                "expected": 5.0,
                "match": true
              },
              "basic_tastes.bitter": {
                "predicted": 2.0,
                "expected": 3.0,
                "match": true
              },
              "structural.body": {
                "predicted": 3.0,
                "expected": 3.0,
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 448,
            "output_tokens": 201,
            "latency_ms": 2027.4,
            "usd": 0.000501,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 448.0,
        "output_tokens_mean": 183.667,
        "latency_ms_mean": 2042.633,
        "usd_sum": 0.00142
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:struct-020",
      "category": "structured_output",
      "capability": "schema_valid_pairing",
      "difficulty": "expert",
      "cuisine_region": "mexican",
      "scorer": "schema_validation",
      "context_needs": [
        "schema.pairing",
        "vocab.match_types",
        "vocab.explanation_codes",
        "vocab.grade_scale",
        "id.format",
        "ogs_version"
      ],
      "prompt_template_hash": "5b115600e8dad71f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS pairing schema (`schema/pairing.schema.json`)\n\n```json\n{\n  \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n  \"$id\": \"https://opengastronomy.org/schema/v0.2/pairing.schema.json\",\n  \"title\": \"OGS Pairing\",\n  \"description\": \"A scored, explained pairing between a dish and a beverage.\",\n  \"type\": \"object\",\n  \"required\": [\n    \"id\",\n    \"ogs_version\",\n    \"name\",\n    \"dish_ref\",\n    \"beverage_ref\",\n    \"match_types\",\n    \"overall_score\",\n    \"grade\",\n    \"explanation_codes\",\n    \"confidence\"\n  ],\n  \"properties\": {\n    \"id\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:pairing:[a-z0-9-]+$\"\n    },\n    \"ogs_version\": {\n      \"type\": \"string\",\n      \"pattern\": \"^0\\\\.(1|2)\\\\.\\\\d+$\"\n    },\n    \"primary_language\": {\n      \"$ref\": \"#/$defs/bcp47_tag\"\n    },\n    \"available_languages\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/bcp47_tag\"\n      }\n    },\n    \"name\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"description\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"dish_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:dish:[a-z0-9-]+$\"\n    },\n    \"beverage_ref\": {\n      \"type\": \"string\",\n      \"pattern\": \"^ogs:[a-z0-9-]+:beverage:[a-z0-9-]+$\"\n    },\n    \"match_types\": {\n      \"type\": \"array\",\n      \"minItems\": 1,\n      \"items\": {\n        \"$ref\": \"#/$defs/match_type_entry\"\n      }\n    },\n    \"overall_score\": {\n      \"type\": \"integer\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    },\n    \"grade\": {\n      \"type\": \"string\",\n      \"enum\": [\n        \"A+\",\n        \"A\",\n        \"B+\",\n        \"B\",\n        \"C+\",\n        \"C\",\n        \"D\",\n        \"F\"\n      ]\n    },\n    \"explanation_codes\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/explanation_code_entry\"\n      }\n    },\n    \"risks\": {\n      \"type\": \"array\",\n      \"items\": {\n        \"$ref\": \"#/$defs/risk_entry\"\n      }\n    },\n    \"confidence\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 1\n    },\n    \"evidence\": {\n      \"$ref\": \"#/$defs/evidence\"\n    },\n    \"notes\": {\n      \"$ref\": \"#/$defs/lstring\"\n    },\n    \"metadata\": {\n      \"$ref\": \"#/$defs/metadata\"\n    }\n  },\n  \"additionalProperties\": true,\n  \"$defs\": {\n    \"bcp47_tag\": {\n      \"type\": \"string\",\n      \"pattern\": \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\"\n    },\n    \"lstring\": {\n      \"oneOf\": [\n        {\n          \"type\": \"string\",\n          \"minLength\": 1\n        },\n        {\n          \"type\": \"object\",\n          \"minProperties\": 1,\n          \"patternProperties\": {\n            \"^[A-Za-z]{2,3}(-[A-Za-z0-9]{2,8})*$\": {\n              \"type\": \"string\",\n              \"minLength\": 1\n            }\n          },\n          \"additionalProperties\": false\n        },\n        {\n          \"type\": \"object\",\n          \"required\": [\n            \"labels\"\n          ],\n          \"properties\": {\n            \"primary_language\": {\n              \"$ref\": \"#/$defs/bcp47_tag\"\n            },\n            \"labels\": {\n              \"type\": \"array\",\n              \"minItems\": 1,\n              \"items\": {\n                \"type\": \"object\",\n                \"required\": [\n                  \"value\",\n                  \"language\"\n                ],\n                \"properties\": {\n                  \"value\": {\n                    \"type\": \"string\",\n                    \"minLength\": 1\n                  },\n                  \"language\": {\n                    \"$ref\": \"#/$defs/bcp47_tag\"\n                  },\n                  \"script\": {\n                    \"type\": \"string\",\n                    \"pattern\": \"^[A-Z][a-z]{3}$\"\n                  },\n                  \"role\": {\n                    \"type\": \"string\",\n                    \"enum\": [\n                      \"preferred\",\n                      \"alternate\",\n                      \"transliteration\",\n                      \"descriptive\",\n                      \"loan\",\n                      \"historical\",\n                      \"deprecated\",\n                      \"ipa\"\n                    ]\n                  },\n                  \"transliteration_scheme\": {\n                    \"type\": \"string\"\n                  },\n                  \"translation\": {\n                    \"type\": \"object\",\n                    \"additionalProperties\": true\n                  }\n                },\n                \"additionalProperties\": false\n              }\n            }\n          },\n          \"additionalProperties\": false\n        }\n      ]\n    },\n    \"match_type_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"type\",\n        \"score\"\n      ],\n      \"properties\": {\n        \"type\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"harmony\",\n            \"contrast\",\n            \"bridge\",\n            \"intensity_alignment\",\n            \"structural_balance\",\n            \"refresh\"\n          ]\n        },\n        \"score\": {\n          \"type\": \"integer\",\n          \"minimum\": 0,\n          \"maximum\": 100\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"primary\": {\n          \"type\": \"boolean\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"explanation_code_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"weight\": {\n          \"type\": \"number\",\n          \"minimum\": 0,\n          \"maximum\": 1\n        },\n        \"detail\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"risk_entry\": {\n      \"type\": \"object\",\n      \"required\": [\n        \"code\",\n        \"severity\"\n      ],\n      \"properties\": {\n        \"code\": {\n          \"type\": \"string\"\n        },\n        \"severity\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"low\",\n            \"medium\",\n            \"high\"\n          ]\n        },\n        \"description\": {\n          \"$ref\": \"#/$defs/lstring\"\n        },\n        \"mitigation\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": false\n    },\n    \"evidence\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"method\": {\n          \"type\": \"string\",\n          \"enum\": [\n            \"expert_panel\",\n            \"instrumental\",\n            \"computed\",\n            \"estimated\"\n          ]\n        },\n        \"sources\": {\n          \"type\": \"array\",\n          \"items\": {\n            \"type\": \"string\"\n          }\n        },\n        \"notes\": {\n          \"$ref\": \"#/$defs/lstring\"\n        }\n      },\n      \"additionalProperties\": true\n    },\n    \"metadata\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"created\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"updated\": {\n          \"type\": \"string\",\n          \"format\": \"date-time\"\n        },\n        \"source\": {\n          \"type\": \"string\"\n        },\n        \"license\": {\n          \"type\": \"string\"\n        }\n      },\n      \"additionalProperties\": true\n    }\n  }\n}\n```\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n### OGS Explanation Codes\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**negative**\n- `RISK_TANNIN_FISH` — Tannin-Fish Clash: Tannins interact with fish oils to produce metallic, unpleasant off-flavors.\n- `RISK_ALCOHOL_OVERPOWERS` — Alcohol Overpowers Dish: High alcohol content overwhelms the dish's delicate flavors with heat and bitterness.\n- `RISK_BITTER_AMPLIFY` — Bitterness Amplification: Bitterness in both dish and beverage compounds to an unpleasant level.\n- `RISK_OAK_MASKS` — Oak Masks Dish: Heavy oak influence in the beverage masks or overwhelms the dish's subtle flavors.\n- `RISK_INTENSITY_MISMATCH` — Intensity Mismatch: One element is significantly more intense than the other, causing the weaker to be lost.\n- `RISK_ACID_CLASH` — Acid Clash: Excessive acidity from both dish and beverage creates an unpleasantly sharp experience.\n- `RISK_SWEETNESS_CONFLICT` — Sweetness Conflict: The dish makes the beverage taste less sweet, thin, or overly acidic.\n- `RISK_SPICE_ALCOHOL` — Spice-Alcohol Amplification: High alcohol amplifies the heat of spicy dishes to an uncomfortable level.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n### OGS ID format\n\nAll OGS entity IDs follow the pattern:\n\n    ogs:<namespace>:<entity-type>:<slug>\n\n- `namespace` is lowercase alphanumeric with hyphens, e.g. `core`.\n- `entity-type` is one of `ingredient`, `dish`, `beverage`, `pairing`,\n  `benchmark-task`, etc.\n- `slug` is lowercase alphanumeric with hyphens.\n\nExample: `ogs:core:ingredient:fleur-de-sel`.\n\n### OGS version\n\nAll produced documents must set `ogs_version` to **\"0.1.0\"**.\n\n---\n\nProduce an OGS pairing JSON for mole-negro enchiladas with Aragonese Garnacha. JSON only.\n\nINPUT:\n{\n  \"pairing_slug\": \"mole-negro-garnacha\",\n  \"dish_ref\": \"ogs:core:dish:mole-negro-enchiladas\",\n  \"beverage_ref\": \"ogs:core:beverage:garnacha-aragon\",\n  \"intent\": \"Cross-cultural bridge pairing: ripe fruit and body handle chocolate-chile sauce.\"\n}\n\nANSWER FORMAT: json",
      "expected": {
        "schema_ref": "schema/pairing.schema.json",
        "required_fields": {
          "id": "ogs:core:pairing:mole-negro-garnacha",
          "ogs_version": "0.1.0",
          "dish_ref": "ogs:core:dish:mole-negro-enchiladas",
          "beverage_ref": "ogs:core:beverage:garnacha-aragon"
        }
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:mole-negro-garnacha\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Mole Negro Enchiladas with Aragonese Garnacha\"\n  },\n  \"dish_ref\": \"ogs:core:dish:mole-negro-enchiladas\",\n  \"beverage_ref\": \"ogs:core:beverage:garnacha-aragon\",\n  \"match_types\": [\n    {\n      \"type\": \"bridge\",\n      \"score\": 90,\n      \"description\": {\n        \"en\": \"The wine's ripe red fruit and medium body bridge well with the complex chocolate and mild chili spice of the mole negro sauce.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"harmony\",\n      \"score\": 80,\n      \"description\": {\n        \"en\": \"Shared savory and spicy elements between the dish and the wine create a harmonious flavor profile.\"\n      }\n    },\n    {\n      \"type\": \"intensity_alignment\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"The medium intensity and body of the Garnacha align well with the rich and layered flavors of the mole sauce.\"\n      }\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 70,\n      \"description\": {\n        \"en\": \"The wine's moderate acidity helps refresh the palate from the richness of the sauce.\"\n      }\n    }\n  ],\n  \"overall_score\": 85,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"AROMA_SHARED_SPICE\",\n      \"weight\": 0.4,\n      \"detail\": {\n        \"en\": \"Both dish and beverage feature spice aromatics which create an aromatic bridge.\"\n      }\n    },\n    {\n      \"code\": \"AROMA_SHARED_FRUIT\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"The ripe fruit notes in the Garnacha echo the subtle fruitiness in the mole sauce.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_BODY_ALIGN\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"The medium body of the Garnacha matches the rich body of the mole sauce.\"\n      }\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"The wine’s acidity cuts through the richness, refreshing the palate.\"\n      }\n    }\n  ],\n  \"confidence\": 0.9,\n  \"notes\": {\n    \"en\": \"This pairing highlights the interplay between the complex, chocolate-chili mole and the bright, ripe fruit character of Aragonese Garnacha, creating a satisfying cross-cultural bridge.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:mole-negro-garnacha",
                "expected": "ogs:core:pairing:mole-negro-garnacha",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:mole-negro-enchiladas",
                "expected": "ogs:core:dish:mole-negro-enchiladas",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:garnacha-aragon",
                "expected": "ogs:core:beverage:garnacha-aragon",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3372,
            "output_tokens": 595,
            "latency_ms": 6092.8,
            "usd": 0.002301,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:mole-negro-garnacha\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Mole Negro Enchiladas with Aragonese Garnacha\"\n  },\n  \"dish_ref\": \"ogs:core:dish:mole-negro-enchiladas\",\n  \"beverage_ref\": \"ogs:core:beverage:garnacha-aragon\",\n  \"match_types\": [\n    {\n      \"type\": \"bridge\",\n      \"score\": 90,\n      \"description\": {\n        \"en\": \"The ripe fruit and robust body of Garnacha complements the complex chocolate and chile flavors of the mole sauce, bridging Mexican and Spanish flavors.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"harmony\",\n      \"score\": 80,\n      \"description\": {\n        \"en\": \"The intensity and complexity of both dish and beverage align, creating a harmonious balanced pairing.\"\n      }\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"The body of the Garnacha matches the rich, layered textures of the mole negro, ensuring structural balance.\"\n      }\n    }\n  ],\n  \"overall_score\": 85,\n  \"grade\": \"B+\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"AROMA_SHARED_FRUIT\",\n      \"weight\": 0.4,\n      \"detail\": {\n        \"en\": \"Shared ripe fruit aromatics in both mole sauce and Garnacha create an aromatic bridge.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_BODY_ALIGN\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"The wine's full body aligns well with the rich and hearty mole negro sauce.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_INTENSITY_MATCH\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"Intensity levels are well matched, preventing either element from overpowering the other.\"\n      }\n    },\n    {\n      \"code\": \"AROMA_SHARED_SPICE\",\n      \"weight\": 0.1,\n      \"detail\": {\n        \"en\": \"Spice notes in the Garnacha echo the chile spiciness in the mole.\"\n      }\n    }\n  ],\n  \"confidence\": 0.9,\n  \"notes\": {\n    \"en\": \"This pairing uses the ripe fruit and structured body of Aragonese Garnacha to bridge the complex, layered flavors of traditional mole negro enchiladas.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:mole-negro-garnacha",
                "expected": "ogs:core:pairing:mole-negro-garnacha",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:mole-negro-enchiladas",
                "expected": "ogs:core:dish:mole-negro-enchiladas",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:garnacha-aragon",
                "expected": "ogs:core:beverage:garnacha-aragon",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3372,
            "output_tokens": 551,
            "latency_ms": 4793.6,
            "usd": 0.00223,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "{\n  \"id\": \"ogs:core:pairing:mole-negro-garnacha\",\n  \"ogs_version\": \"0.1.0\",\n  \"name\": {\n    \"en\": \"Mole Negro Enchiladas & Aragonese Garnacha\"\n  },\n  \"dish_ref\": \"ogs:core:dish:mole-negro-enchiladas\",\n  \"beverage_ref\": \"ogs:core:beverage:garnacha-aragon\",\n  \"match_types\": [\n    {\n      \"type\": \"bridge\",\n      \"score\": 88,\n      \"description\": {\n        \"en\": \"The ripe fruit character and body of Garnacha complement the rich chocolate and chile complexity of the mole sauce, forming a flavorful bridge.\"\n      },\n      \"primary\": true\n    },\n    {\n      \"type\": \"harmony\",\n      \"score\": 80,\n      \"description\": {\n        \"en\": \"Both dish and beverage share an intense, robust flavor profile that converges harmoniously.\"\n      }\n    },\n    {\n      \"type\": \"structural_balance\",\n      \"score\": 85,\n      \"description\": {\n        \"en\": \"The Garnacha’s body aligns well with the richness of the mole sauce, balancing mouthfeel.\"\n      }\n    },\n    {\n      \"type\": \"refresh\",\n      \"score\": 70,\n      \"description\": {\n        \"en\": \"The wine’s moderate acidity refreshes the palate after the richness of the mole.\"\n      }\n    }\n  ],\n  \"overall_score\": 83,\n  \"grade\": \"B\",\n  \"explanation_codes\": [\n    {\n      \"code\": \"AROMA_SHARED_FRUIT\",\n      \"weight\": 0.3,\n      \"detail\": {\n        \"en\": \"Shared ripe fruit aromatics create a tasty aromatic bridge.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_BODY_ALIGN\",\n      \"weight\": 0.25,\n      \"detail\": {\n        \"en\": \"The Garnacha’s body matches the richness of the dish.\"\n      }\n    },\n    {\n      \"code\": \"STRUCT_INTENSITY_MATCH\",\n      \"weight\": 0.2,\n      \"detail\": {\n        \"en\": \"Flavor intensity is well aligned between dish and beverage.\"\n      }\n    },\n    {\n      \"code\": \"REFRESH_ACID\",\n      \"weight\": 0.15,\n      \"detail\": {\n        \"en\": \"The wine’s acidity cuts through richness, refreshing the palate.\"\n      }\n    },\n    {\n      \"code\": \"HARMONY_FLAVOR_CONVERGE\",\n      \"weight\": 0.1,\n      \"detail\": {\n        \"en\": \"Flavor profiles of the mole and Garnacha converge harmoniously.\"\n      }\n    }\n  ],\n  \"confidence\": 0.9,\n  \"notes\": {\n    \"en\": \"This pairing balances the complexity and richness of mole negro enchiladas with the ripe fruit character and balanced body of an Aragonese Garnacha, creating a complex but approachable match.\"\n  }\n}",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "schema_valid": true,
            "schema_errors": [],
            "consistent": true,
            "required_fields": {
              "id": {
                "predicted": "ogs:core:pairing:mole-negro-garnacha",
                "expected": "ogs:core:pairing:mole-negro-garnacha",
                "match": true
              },
              "ogs_version": {
                "predicted": "0.1.0",
                "expected": "0.1.0",
                "match": true
              },
              "dish_ref": {
                "predicted": "ogs:core:dish:mole-negro-enchiladas",
                "expected": "ogs:core:dish:mole-negro-enchiladas",
                "match": true
              },
              "beverage_ref": {
                "predicted": "ogs:core:beverage:garnacha-aragon",
                "expected": "ogs:core:beverage:garnacha-aragon",
                "match": true
              }
            }
          },
          "metrics": {
            "input_tokens": 3372,
            "output_tokens": 629,
            "latency_ms": 5622.6,
            "usd": 0.002355,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 3372.0,
        "output_tokens_mean": 591.667,
        "latency_ms_mean": 5503.0,
        "usd_sum": 0.006886
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-001",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "e52270d817a9dc7f",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single aromatic family (from the controlled vocabulary) that best classifies the aromatic item below. Respond with just the family name.\n\nINPUT:\n{\n  \"item\": \"lemon zest\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "fruit"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 611.5,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 658.7,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "fruit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fruit",
            "expected": "fruit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 333,
            "output_tokens": 1,
            "latency_ms": 544.6,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 333.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 604.933,
        "usd_sum": 0.000405
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-002",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "e2096242953ff882",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single aromatic family that best classifies the item below. Respond with just the family name.\n\nINPUT:\n{\n  \"item\": \"wet stone\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "earth"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "earth",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "earth",
            "expected": "earth",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 325,
            "output_tokens": 1,
            "latency_ms": 574.8,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "earth",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "earth",
            "expected": "earth",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 325,
            "output_tokens": 1,
            "latency_ms": 477.4,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "earth",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "earth",
            "expected": "earth",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 325,
            "output_tokens": 1,
            "latency_ms": 554.5,
            "usd": 0.000132,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 325.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 535.567,
        "usd_sum": 0.000396
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-003",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "fa5fa32bfe275dfb",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single aromatic family that best classifies the item below. Respond with just the family name.\n\nINPUT:\n{\n  \"item\": \"cooked beef fat\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "savory"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "savory",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "savory",
            "expected": "savory",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 327,
            "output_tokens": 2,
            "latency_ms": 480.2,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "savory",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "savory",
            "expected": "savory",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 327,
            "output_tokens": 2,
            "latency_ms": 732.2,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "savory",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "savory",
            "expected": "savory",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 327,
            "output_tokens": 2,
            "latency_ms": 535.2,
            "usd": 0.000134,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 327.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 582.533,
        "usd_sum": 0.000402
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-004",
      "category": "vocabulary",
      "capability": "cooking_method_vocabulary",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "e0631034b8b8ece4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nPick the single OGS cooking method that best describes a protein slowly cooked while fully submerged in fat at a low temperature. Respond with just the method name from the OGS vocabulary.\n\nINPUT:\n{\n  \"description\": \"duck legs slow-cooked submerged in their own rendered fat at 80 C\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "confit"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "confit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "confit",
            "expected": "confit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 407,
            "output_tokens": 2,
            "latency_ms": 466.8,
            "usd": 0.000166,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "confit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "confit",
            "expected": "confit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 407,
            "output_tokens": 2,
            "latency_ms": 652.9,
            "usd": 0.000166,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "confit",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "confit",
            "expected": "confit",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 407,
            "output_tokens": 2,
            "latency_ms": 970.6,
            "usd": 0.000166,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 407.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 696.767,
        "usd_sum": 0.000498
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-005",
      "category": "vocabulary",
      "capability": "match_type_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "bbbd81bb3c8b8ea4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nSelect the single OGS match type that best describes a pairing whose strength is driven by a specific shared aromatic compound or family between dish and beverage.\n\nINPUT:\n{\n  \"description\": \"dish and wine are linked by a shared earthy, forest-floor aromatic\"\n}\n\nANSWER FORMAT: one_of:harmony,contrast,bridge,intensity_alignment,structural_balance,refresh",
      "expected": {
        "value": "bridge"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "bridge",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "bridge",
            "expected": "bridge",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 317,
            "output_tokens": 1,
            "latency_ms": 583.1,
            "usd": 0.000128,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "bridge",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "bridge",
            "expected": "bridge",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 317,
            "output_tokens": 1,
            "latency_ms": 725.4,
            "usd": 0.000128,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "bridge",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "bridge",
            "expected": "bridge",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 317,
            "output_tokens": 1,
            "latency_ms": 571.1,
            "usd": 0.000128,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 317.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 626.533,
        "usd_sum": 0.000384
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-006",
      "category": "vocabulary",
      "capability": "component_role_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "6c6ae6f82f354be3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nReturn the single OGS component role that best fits the described element. Use only roles from the OGS controlled vocabulary.\n\nINPUT:\n{\n  \"element\": \"a soy-dashi based liquid ladled over the rice bowl, providing salt, umami and moisture\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "sauce"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sauce",
            "expected": "sauce",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 351,
            "output_tokens": 2,
            "latency_ms": 581.2,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sauce",
            "expected": "sauce",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 351,
            "output_tokens": 2,
            "latency_ms": 10079.8,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "sauce",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "sauce",
            "expected": "sauce",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 351,
            "output_tokens": 2,
            "latency_ms": 629.3,
            "usd": 0.000144,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 351.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 3763.433,
        "usd_sum": 0.000432
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-007",
      "category": "vocabulary",
      "capability": "qualifier_vocabulary",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.qualifiers"
      ],
      "prompt_template_hash": "0adac8c06022acf5",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic qualifiers\n\nfresh, dried, cooked, candied, fermented, zest, overripe\n\n---\n\nSelect the OGS aromatic qualifier that best describes how the item is expressed.\n\nINPUT:\n{\n  \"item\": \"candied ginger\"\n}\n\nANSWER FORMAT: one_of:fresh,dried,cooked,candied,fermented,zest,overripe",
      "expected": {
        "value": "candied"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "candied",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "candied",
            "expected": "candied",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 299,
            "output_tokens": 2,
            "latency_ms": 504.8,
            "usd": 0.000123,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "candied",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "candied",
            "expected": "candied",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 299,
            "output_tokens": 2,
            "latency_ms": 1518.1,
            "usd": 0.000123,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "candied",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "candied",
            "expected": "candied",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 299,
            "output_tokens": 2,
            "latency_ms": 553.8,
            "usd": 0.000123,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 299.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 858.9,
        "usd_sum": 0.000369
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-008",
      "category": "vocabulary",
      "capability": "grade_scale",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.grade_scale"
      ],
      "prompt_template_hash": "e58319dc1276cdb2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n---\n\nUsing the OGS pairing grade scale (ogs-match.md §5), return the letter grade for the given overall score. Respond with just the grade token, e.g. 'A+'.\n\nINPUT:\n{\n  \"overall_score\": 92\n}\n\nANSWER FORMAT: one_of:A+,A,B+,B,C+,C,D,F",
      "expected": {
        "value": "A"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "A",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "A",
            "expected": "A",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 386,
            "output_tokens": 1,
            "latency_ms": 566.2,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "A",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "A",
            "expected": "A",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 386,
            "output_tokens": 1,
            "latency_ms": 621.0,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "A",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "A",
            "expected": "A",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 386,
            "output_tokens": 1,
            "latency_ms": 715.0,
            "usd": 0.000156,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 386.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 634.067,
        "usd_sum": 0.000468
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-009",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "easy",
      "cuisine_region": "spanish",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "d51864ab27ecfa64",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single aromatic family that best characterises smoked paprika. Respond with just the family name.\n\nINPUT:\n{\n  \"item\": \"smoked paprika (pimentón de la Vera)\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "spice"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "spice",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "spice",
            "expected": "spice",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 332,
            "output_tokens": 2,
            "latency_ms": 778.9,
            "usd": 0.000136,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "smoke is not an aromatic family in the given OGS controlled vocabularies. smoked paprika is primarily characterized by a combination of smoke and spice. Since smoke is not an option, I would select spice as the most representative aromatic family for smoked paprika.\n\nspice",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "smoke is not an aromatic family in the given ogs controlled vocabularies. smoked paprika is primarily characterized by a combination of smoke and spice. since smoke is not an option, i would select spice as the most representative aromatic family for smoked paprika. spice",
            "expected": "spice",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 332,
            "output_tokens": 53,
            "latency_ms": 1048.3,
            "usd": 0.000218,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "smoke is closely related to wood in aromatic family for smoked paprika.\n\nwood",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "smoke is closely related to wood in aromatic family for smoked paprika. wood",
            "expected": "spice",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 332,
            "output_tokens": 15,
            "latency_ms": 876.6,
            "usd": 0.000157,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 332.0,
        "output_tokens_mean": 23.333,
        "latency_ms_mean": 901.267,
        "usd_sum": 0.000511
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-010",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "medium",
      "cuisine_region": "japanese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "1748069ad0ebdde7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single dominant aromatic family of a classic kombu-katsuo dashi.\n\nINPUT:\n{\n  \"item\": \"classic kombu-katsuobushi dashi\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "marine"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "marine",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "marine",
            "expected": "marine",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 1,
            "latency_ms": 653.1,
            "usd": 0.000133,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "marine",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "marine",
            "expected": "marine",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 1,
            "latency_ms": 476.4,
            "usd": 0.000133,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "savory",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "savory",
            "expected": "marine",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 329,
            "output_tokens": 2,
            "latency_ms": 619.3,
            "usd": 0.000135,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": 0.0133,
        "ci95_hi": 1.32,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 329.0,
        "output_tokens_mean": 1.333,
        "latency_ms_mean": 582.933,
        "usd_sum": 0.000401
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-011",
      "category": "vocabulary",
      "capability": "cooking_method_vocabulary",
      "difficulty": "easy",
      "cuisine_region": "indian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "85de6add5c1ebfba",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nPick the single OGS cooking method that best matches tandoori chicken cooked at 450°C in a clay oven.\n\nINPUT:\n{\n  \"description\": \"tandoori chicken, marinated and cooked in a clay tandoor at ~450°C\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "roasted"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "roasted",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "roasted",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 399,
            "output_tokens": 2,
            "latency_ms": 1106.3,
            "usd": 0.000163,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "grilled",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "grilled",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 399,
            "output_tokens": 2,
            "latency_ms": 560.9,
            "usd": 0.000163,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "grilled",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "grilled",
            "expected": "roasted",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 399,
            "output_tokens": 2,
            "latency_ms": 547.1,
            "usd": 0.000163,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 399.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 738.1,
        "usd_sum": 0.000489
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-012",
      "category": "vocabulary",
      "capability": "cooking_method_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "peruvian",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "44c9b26dc70c4557",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nPick the single OGS cooking method that best describes Peruvian ceviche's treatment of white fish in lime juice.\n\nINPUT:\n{\n  \"description\": \"diced white fish denatured in freshly squeezed lime juice for 10 minutes with red onion and aji limo\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "cured"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 402,
            "output_tokens": 2,
            "latency_ms": 3402.3,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 402,
            "output_tokens": 2,
            "latency_ms": 476.5,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "cured",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "cured",
            "expected": "cured",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 402,
            "output_tokens": 2,
            "latency_ms": 593.8,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 402.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 1490.867,
        "usd_sum": 0.000492
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-013",
      "category": "vocabulary",
      "capability": "component_role_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "moroccan",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "dc3e62daf8bdf21a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nReturn the single OGS component role that best fits harissa stirred into a lamb tagine at service.\n\nINPUT:\n{\n  \"description\": \"a spoonful of fiery harissa stirred into a lamb-and-apricot tagine at service\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "seasoning"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "sauce",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "sauce",
            "expected": "seasoning",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 498.5,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "sauce",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "sauce",
            "expected": "seasoning",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 729.6,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "sauce",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "sauce",
            "expected": "seasoning",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 585.8,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0,
      "score_summary": {
        "mean": 0.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.0,
        "ci95_hi": 0.0,
        "n": 3
      },
      "score_if_valid": 0.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 347.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 604.633,
        "usd_sum": 0.000426
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-014",
      "category": "vocabulary",
      "capability": "qualifier_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "chinese",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.qualifiers"
      ],
      "prompt_template_hash": "3980456a47e9b7c6",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic qualifiers\n\nfresh, dried, cooked, candied, fermented, zest, overripe\n\n---\n\nSelect the OGS aromatic qualifier that best describes the state of Sichuan peppercorn as it is commonly used — whole, toasted, and ground over finished mapo tofu.\n\nINPUT:\n{\n  \"item\": \"Sichuan peppercorn (Zanthoxylum), whole, pan-toasted just before grinding over the dish\"\n}\n\nANSWER FORMAT: one_of:fresh,dried,cooked,candied,fermented,zest,overripe",
      "expected": {
        "value": "dried"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "toasted is not in list, but pan-toasted is synonymous with cooked in culinary sense. So cooked fits best.\n\ncooked",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "toasted is not in list, but pan-toasted is synonymous with cooked in culinary sense. so cooked fits best. cooked",
            "expected": "dried",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 337,
            "output_tokens": 26,
            "latency_ms": 986.0,
            "usd": 0.000176,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "toasted",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": "toasted",
            "expected": "dried",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 337,
            "output_tokens": 2,
            "latency_ms": 548.9,
            "usd": 0.000138,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "dried",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "dried",
            "expected": "dried",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 337,
            "output_tokens": 2,
            "latency_ms": 618.9,
            "usd": 0.000138,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.3333,
      "score_summary": {
        "mean": 0.3333,
        "stdev": 0.5774,
        "sem": 0.3333,
        "ci95_lo": -0.32,
        "ci95_hi": 0.9867,
        "n": 3
      },
      "score_if_valid": 0.3333,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 337.0,
        "output_tokens_mean": 10.0,
        "latency_ms_mean": 717.933,
        "usd_sum": 0.000452
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-015",
      "category": "vocabulary",
      "capability": "grade_scale",
      "difficulty": "easy",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.grade_scale"
      ],
      "prompt_template_hash": "ad313a98974d64f7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n---\n\nUsing the OGS pairing grade scale, return the letter grade for a pairing whose `overall_score` is 78.\n\nINPUT:\n{\n  \"overall_score\": 78\n}\n\nANSWER FORMAT: one_of:A+,A,B+,B,C+,C,D,F",
      "expected": {
        "value": "C+"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "C+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "c+",
            "expected": "c+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 2,
            "latency_ms": 609.6,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "C+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "c+",
            "expected": "c+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 2,
            "latency_ms": 472.7,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "C+",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "c+",
            "expected": "c+",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 2,
            "latency_ms": 582.4,
            "usd": 0.000152,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 372.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 554.9,
        "usd_sum": 0.000456
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-016",
      "category": "vocabulary",
      "capability": "match_type_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "british",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.match_types"
      ],
      "prompt_template_hash": "95e41ab82cba029a",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Match types\n\nharmony, contrast, bridge, intensity_alignment, structural_balance, refresh\n\n---\n\nSelect the single OGS match type that best describes a beverage whose carbonation physically scrubs the palate between bites of a rich fried dish.\n\nINPUT:\n{\n  \"description\": \"Champagne cleanses the palate between bites of fish and chips\"\n}\n\nANSWER FORMAT: one_of:harmony,contrast,bridge,intensity_alignment,structural_balance,refresh",
      "expected": {
        "value": "refresh"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "refresh",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "refresh",
            "expected": "refresh",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 385.4,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "refresh",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "refresh",
            "expected": "refresh",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 789.4,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "refresh",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "refresh",
            "expected": "refresh",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 314,
            "output_tokens": 1,
            "latency_ms": 1406.6,
            "usd": 0.000127,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 314.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 860.467,
        "usd_sum": 0.000381
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-017",
      "category": "vocabulary",
      "capability": "aromatic_family_classification",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.aromatic_families"
      ],
      "prompt_template_hash": "8070fb687fd2f108",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Aromatic families\n\nfruit, floral, herbal, spice, earth, wood, dairy, savory, confection, vegetal, marine\n\n---\n\nUsing the OGS sensory model, return the single dominant aromatic family of fresh cilantro leaves.\n\nINPUT:\n{\n  \"item\": \"fresh cilantro / coriander leaves\"\n}\n\nANSWER FORMAT: one_of:fruit,floral,herbal,spice,earth,wood,dairy,savory,confection,vegetal,marine",
      "expected": {
        "value": "herbal"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 319,
            "output_tokens": 2,
            "latency_ms": 512.5,
            "usd": 0.000131,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 319,
            "output_tokens": 2,
            "latency_ms": 355.5,
            "usd": 0.000131,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "herbal",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "herbal",
            "expected": "herbal",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 319,
            "output_tokens": 2,
            "latency_ms": 607.9,
            "usd": 0.000131,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 319.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 491.967,
        "usd_sum": 0.000393
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-018",
      "category": "vocabulary",
      "capability": "component_role_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "levantine",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.component_roles"
      ],
      "prompt_template_hash": "9e97b59361dbdf0e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Component roles\n\nmain_protein, main_vegetable, starch, sauce, fat_component, acid_component, aromatic_garnish, seasoning, textural_element\n\n---\n\nReturn the single OGS component role that best fits a drizzle of pomegranate molasses dressed onto a fattoush salad.\n\nINPUT:\n{\n  \"description\": \"pomegranate molasses drizzled over a fattoush salad at service\"\n}\n\nANSWER FORMAT: one_of:main_protein,main_vegetable,starch,sauce,fat_component,acid_component,aromatic_garnish,seasoning,textural_element",
      "expected": {
        "value": "acid_component"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "acid_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "acid_component",
            "expected": "acid_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 507.2,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "acid_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "acid_component",
            "expected": "acid_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 730.4,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "acid_component",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "acid_component",
            "expected": "acid_component",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 347,
            "output_tokens": 2,
            "latency_ms": 510.9,
            "usd": 0.000142,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 347.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 582.833,
        "usd_sum": 0.000426
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-019",
      "category": "vocabulary",
      "capability": "cooking_method_vocabulary",
      "difficulty": "medium",
      "cuisine_region": "filipino",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.cooking_methods"
      ],
      "prompt_template_hash": "fc9a763682f1225b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Cooking methods\n\nraw, seared, grilled, roasted, braised, poached, steamed, fried, confit, smoked, fermented, cured, pickled, dehydrated, sous_vide, blanched, sauteed, baked, charred, emulsified\n\n---\n\nPick the single OGS cooking method that best describes bagoong alamang (Philippine fermented salt-cured shrimp paste).\n\nINPUT:\n{\n  \"description\": \"bagoong alamang: salt-cured tiny shrimp left to ferment for 3-6 months\"\n}\n\nANSWER FORMAT: one_of:raw,seared,grilled,roasted,braised,poached,steamed,fried,confit,smoked,fermented,cured,pickled,dehydrated,sous_vide,blanched,sauteed,baked,charred,emulsified",
      "expected": {
        "value": "fermented"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "fermented",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fermented",
            "expected": "fermented",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 401,
            "output_tokens": 2,
            "latency_ms": 617.4,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "fermented",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fermented",
            "expected": "fermented",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 401,
            "output_tokens": 2,
            "latency_ms": 500.9,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "fermented",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "fermented",
            "expected": "fermented",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 401,
            "output_tokens": 2,
            "latency_ms": 658.9,
            "usd": 0.000164,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 401.0,
        "output_tokens_mean": 2.0,
        "latency_ms_mean": 592.4,
        "usd_sum": 0.000492
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:vocab-020",
      "category": "vocabulary",
      "capability": "grade_scale",
      "difficulty": "medium",
      "cuisine_region": "global",
      "scorer": "exact_match",
      "context_needs": [
        "vocab.grade_scale"
      ],
      "prompt_template_hash": "56ad4f9c58bc1eb7",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### Grade scale (derived from 0-100 overall_score)\n\n- **A+**: 95-100\n- **A**: 90-94\n- **B+**: 85-89\n- **B**: 80-84\n- **C+**: 75-79\n- **C**: 70-74\n- **D**: 60-69\n- **F**: <60\n\n---\n\nUsing the OGS pairing grade scale, return the letter grade for a pairing whose `overall_score` is 64.\n\nINPUT:\n{\n  \"overall_score\": 64\n}\n\nANSWER FORMAT: one_of:A+,A,B+,B,C+,C,D,F",
      "expected": {
        "value": "D"
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "d",
            "expected": "d",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 1,
            "latency_ms": 627.4,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "d",
            "expected": "d",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 1,
            "latency_ms": 462.9,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "D",
          "compliant": true,
          "score": 1.0,
          "detail": {
            "predicted": "d",
            "expected": "d",
            "aliases": []
          },
          "metrics": {
            "input_tokens": 372,
            "output_tokens": 1,
            "latency_ms": 526.1,
            "usd": 0.00015,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 1.0,
      "score_summary": {
        "mean": 1.0,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 1.0,
        "ci95_hi": 1.0,
        "n": 3
      },
      "score_if_valid": 1.0,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 372.0,
        "output_tokens_mean": 1.0,
        "latency_ms_mean": 538.8,
        "usd_sum": 0.00045
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-001",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "a75ad0ee2a385794",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nFrom the OGS explanation-code vocabulary, return the set of codes that best explain WHY the pairing below is excellent. Respond as a JSON array of code IDs. Pick 3 to 6 most relevant positive codes.\n\nINPUT:\n{\n  \"dish\": \"A5 wagyu with black truffle jus (very rich, high umami, earthy aromatics, full body)\",\n  \"beverage\": \"Barolo 2018 (Nebbiolo, high tannin, high acidity, full body, tar/forest-floor)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_TANNIN_FAT",
          "STRUCT_INTENSITY_MATCH",
          "AROMA_SHARED_EARTH",
          "STRUCT_BODY_ALIGN",
          "HARMONY_FLAVOR_CONVERGE"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_BODY_ALIGN\",\"STRUCT_ACIDITY_BALANCE\",\"HARMONY_COMPLEXITY_MATCH\"]",
          "compliant": true,
          "score": 0.4,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.4,
            "recall": 0.4
          },
          "metrics": {
            "input_tokens": 1159,
            "output_tokens": 35,
            "latency_ms": 966.4,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_BODY_ALIGN\",\"STRUCT_UMAMI_ENHANCE\"]",
          "compliant": true,
          "score": 0.4,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.4,
            "recall": 0.4
          },
          "metrics": {
            "input_tokens": 1159,
            "output_tokens": 35,
            "latency_ms": 694.8,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_BODY_ALIGN\",\"STRUCT_UMAMI_ENHANCE\"]",
          "compliant": true,
          "score": 0.4,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.4,
            "recall": 0.4
          },
          "metrics": {
            "input_tokens": 1159,
            "output_tokens": 35,
            "latency_ms": 646.2,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.4,
      "score_summary": {
        "mean": 0.4,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.4,
        "ci95_hi": 0.4,
        "n": 3
      },
      "score_if_valid": 0.4,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1159.0,
        "output_tokens_mean": 35.0,
        "latency_ms_mean": 769.133,
        "usd_sum": 0.00156
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-002",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "bdc5d8d74c85e9ab",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS explanation codes that best explain the success of the pairing below. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"fresh oysters on the half shell with lemon\",\n  \"beverage\": \"Chablis Premier Cru (Chardonnay, high acid, mineral, lean body)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_ACIDITY_BALANCE",
          "REFRESH_ACID",
          "REFRESH_MINERAL",
          "CONTRAST_RICH_CRISP",
          "HARMONY_REGIONAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"REFRESH_MINERAL\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 40,
            "latency_ms": 834.2,
            "usd": 0.000514,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"REFRESH_MINERAL\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 40,
            "latency_ms": 938.8,
            "usd": 0.000514,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"REFRESH_MINERAL\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 29,
            "latency_ms": 751.9,
            "usd": 0.000496,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6,
      "score_summary": {
        "mean": 0.6,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6,
        "ci95_hi": 0.6,
        "n": 3
      },
      "score_if_valid": 0.6,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1124.0,
        "output_tokens_mean": 36.333,
        "latency_ms_mean": 841.633,
        "usd_sum": 0.001524
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-003",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "3440bcf3978a1c58",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (2 to 4 codes).\n\nINPUT:\n{\n  \"dish\": \"crispy buttermilk fried chicken\",\n  \"beverage\": \"non-vintage Brut Champagne\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "REFRESH_EFFERVESCE",
          "REFRESH_ACID",
          "CONTRAST_RICH_CRISP"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"CONTRAST_RICH_CRISP\",\"REFRESH_EFFERVESCE\",\"REFRESH_ACID\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.8571,
          "detail": {
            "predicted": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE"
            ],
            "precision": 0.75,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 26,
            "latency_ms": 792.1,
            "usd": 0.000486,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"CONTRAST_RICH_CRISP\",\"REFRESH_EFFERVESCE\",\"REFRESH_ACID\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.8571,
          "detail": {
            "predicted": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE"
            ],
            "precision": 0.75,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 26,
            "latency_ms": 642.0,
            "usd": 0.000486,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"CONTRAST_RICH_CRISP\",\n  \"REFRESH_EFFERVESCE\",\n  \"REFRESH_ACID\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.8571,
          "detail": {
            "predicted": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE"
            ],
            "precision": 0.75,
            "recall": 1.0
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 35,
            "latency_ms": 833.0,
            "usd": 0.0005,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.8571,
      "score_summary": {
        "mean": 0.8571,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.8571,
        "ci95_hi": 0.8571,
        "n": 3
      },
      "score_if_valid": 0.8571,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1111.0,
        "output_tokens_mean": 29.0,
        "latency_ms_mean": 755.7,
        "usd_sum": 0.001472
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-004",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "b3249dafda966f70",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS explanation codes that best explain the success of the pairing below. Respond as a JSON array (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"seared foie gras on brioche (very rich, fatty, slightly sweet-savoury)\",\n  \"beverage\": \"Sauternes (botrytised sweet white, high residual sugar, high acidity)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "STRUCT_ACIDITY_BALANCE",
          "CONTRAST_RICH_CRISP",
          "HARMONY_REGIONAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"STRUCT_SWEET_OFFSET\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"CONTRAST_SWEET_ACID\",\n  \"STRUCT_ALCOHOL_WARMTH\"\n]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1131,
            "output_tokens": 47,
            "latency_ms": 1012.4,
            "usd": 0.000528,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"CONTRAST_SWEET_ACID\",\"STRUCT_SWEET_OFFSET\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_FAT_TANNIN\",\"REFRESH_ACID\"]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_FAT_TANNIN",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1131,
            "output_tokens": 35,
            "latency_ms": 863.7,
            "usd": 0.000508,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"CONTRAST_SWEET_ACID\",\"STRUCT_SWEET_OFFSET\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_TANNIN_FAT\"]",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_FAT"
            ],
            "expected": [
              "CONTRAST_RICH_CRISP",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.5,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1131,
            "output_tokens": 30,
            "latency_ms": 895.0,
            "usd": 0.0005,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.4629,
      "score_summary": {
        "mean": 0.4629,
        "stdev": 0.0321,
        "sem": 0.0185,
        "ci95_lo": 0.4266,
        "ci95_hi": 0.4993,
        "n": 3
      },
      "score_if_valid": 0.4629,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1131.0,
        "output_tokens_mean": 37.333,
        "latency_ms_mean": 923.7,
        "usd_sum": 0.001536
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-005",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "0cb2f1946943a0e2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS explanation codes that best explain why the pairing works. Respond as a JSON array (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"charcoal-grilled lamb rack with rosemary, black pepper crust, charred edges\",\n  \"beverage\": \"Northern Rhone Syrah (Cote-Rotie) — black pepper, violet, smoked meat, moderate-high tannin\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "AROMA_SHARED_SPICE",
          "AROMA_SHARED_WOOD",
          "AROMA_SHARED_HERBAL",
          "STRUCT_TANNIN_FAT",
          "STRUCT_TANNIN_PROTEIN"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_SPICE\",\n  \"AROMA_SHARED_WOOD\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"HARMONY_FLAVOR_CONVERGE\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "STRUCT_TANNIN_FAT",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1136,
            "output_tokens": 45,
            "latency_ms": 824.6,
            "usd": 0.000526,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_SPICE\",\n  \"AROMA_SHARED_WOOD\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"HARMONY_FLAVOR_CONVERGE\",\n  \"STRUCT_ACIDITY_BALANCE\"\n]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "STRUCT_TANNIN_FAT",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1136,
            "output_tokens": 48,
            "latency_ms": 919.5,
            "usd": 0.000531,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"AROMA_SHARED_SPICE\",\n  \"AROMA_SHARED_WOOD\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"HARMONY_FLAVOR_CONVERGE\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "AROMA_SHARED_SPICE",
              "AROMA_SHARED_WOOD",
              "STRUCT_TANNIN_FAT",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1136,
            "output_tokens": 45,
            "latency_ms": 863.7,
            "usd": 0.000526,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6,
      "score_summary": {
        "mean": 0.6,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6,
        "ci95_hi": 0.6,
        "n": 3
      },
      "score_if_valid": 0.6,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1136.0,
        "output_tokens_mean": 46.0,
        "latency_ms_mean": 869.267,
        "usd_sum": 0.001583
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-006",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "9a46b0d141442ce3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"Pizza Margherita (tomato, mozzarella, basil)\",\n  \"beverage\": \"Chianti Classico (Sangiovese, high acid, medium tannin)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_ACIDITY_BALANCE",
          "HARMONY_REGIONAL",
          "STRUCT_INTENSITY_MATCH",
          "AROMA_SHARED_HERBAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_HERBAL\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"REFRESH_ACID\",\n  \"HARMONY_REGIONAL\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_INTENSITY_MATCH"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1127,
            "output_tokens": 45,
            "latency_ms": 839.8,
            "usd": 0.000523,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_HERBAL\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"HARMONY_REGIONAL\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_INTENSITY_MATCH"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1127,
            "output_tokens": 44,
            "latency_ms": 1127.0,
            "usd": 0.000521,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"AROMA_SHARED_FRUIT\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_TANNIN_PROTEIN\",\"HARMONY_REGIONAL\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_INTENSITY_MATCH"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1127,
            "output_tokens": 35,
            "latency_ms": 814.4,
            "usd": 0.000507,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1127.0,
        "output_tokens_mean": 41.333,
        "latency_ms_mean": 927.067,
        "usd_sum": 0.001551
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-007",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "chinese",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "497adc64f3303eb4",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"mapo tofu with Sichuan peppercorn\",\n  \"beverage\": \"off-dry German Spätlese Riesling\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "REFRESH_ACID",
          "STRUCT_INTENSITY_MATCH",
          "CONTRAST_SWEET_SALTY"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_SPICE\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"REFRESH_ACID\",\n  \"CONTRAST_SWEET_ACID\"\n]",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "AROMA_SHARED_SPICE",
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.5,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1118,
            "output_tokens": 36,
            "latency_ms": 722.2,
            "usd": 0.000505,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"CONTRAST_SWEET_ACID\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"REFRESH_ACID\",\n  \"STRUCT_UMAMI_ENHANCE\"\n]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1118,
            "output_tokens": 47,
            "latency_ms": 899.1,
            "usd": 0.000522,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"CONTRAST_SWEET_ACID\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"STRUCT_SWEET_OFFSET\",\"HARMONY_COMPLEXITY_MATCH\"]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_ACID",
              "HARMONY_COMPLEXITY_MATCH",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1118,
            "output_tokens": 36,
            "latency_ms": 863.0,
            "usd": 0.000505,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.4629,
      "score_summary": {
        "mean": 0.4629,
        "stdev": 0.0321,
        "sem": 0.0185,
        "ci95_lo": 0.4266,
        "ci95_hi": 0.4993,
        "n": 3
      },
      "score_if_valid": 0.4629,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1118.0,
        "output_tokens_mean": 39.667,
        "latency_ms_mean": 828.1,
        "usd_sum": 0.001532
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-008",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "2120f96a0444a50e",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"herb-crusted rack of lamb, medium-rare\",\n  \"beverage\": \"Northern Rhône Syrah\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_TANNIN_PROTEIN",
          "AROMA_SHARED_HERBAL",
          "STRUCT_BODY_ALIGN",
          "HARMONY_FLAVOR_CONVERGE"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ALCOHOL_WARMTH\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "AROMA_SHARED_HERBAL",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 35,
            "latency_ms": 833.4,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_BODY_ALIGN\",\"HARMONY_REGIONAL\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 33,
            "latency_ms": 715.8,
            "usd": 0.000499,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"AROMA_SHARED_WOOD\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ALCOHOL_WARMTH\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "AROMA_SHARED_WOOD",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 34,
            "latency_ms": 838.8,
            "usd": 0.0005,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1115.0,
        "output_tokens_mean": 34.0,
        "latency_ms_mean": 796.0,
        "usd_sum": 0.001501
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-009",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "thai",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "c077c498ceed5f73",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"Thai green curry with chicken and Thai basil\",\n  \"beverage\": \"off-dry New Zealand Sauvignon Blanc\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "AROMA_SHARED_HERBAL",
          "REFRESH_ACID",
          "STRUCT_INTENSITY_MATCH"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_HERBAL\",\n  \"CONTRAST_SWEET_ACID\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"STRUCT_SWEET_OFFSET\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 46,
            "latency_ms": 844.0,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"AROMA_SHARED_FRUIT\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"STRUCT_SWEET_OFFSET\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_HERBAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 32,
            "latency_ms": 667.9,
            "usd": 0.000497,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"CONTRAST_SWEET_ACID\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"STRUCT_SWEET_OFFSET\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "CONTRAST_SWEET_ACID",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "REFRESH_ACID",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1115,
            "output_tokens": 35,
            "latency_ms": 765.6,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1115.0,
        "output_tokens_mean": 37.667,
        "latency_ms_mean": 759.167,
        "usd_sum": 0.001519
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-010",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "956c6ce654c2e297",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"moules marinières\",\n  \"beverage\": \"Muscadet sur lie\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_ACIDITY_BALANCE",
          "REFRESH_ACID",
          "AROMA_SHARED_HERBAL",
          "HARMONY_REGIONAL",
          "REFRESH_MINERAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"HARMONY_REGIONAL\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"STRUCT_INTENSITY_MATCH\"]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_INTENSITY_MATCH"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 32,
            "latency_ms": 779.1,
            "usd": 0.000496,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"HARMONY_REGIONAL\",\n  \"REFRESH_ACID\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_MINERAL\"\n]",
          "compliant": true,
          "score": 0.8,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.8,
            "recall": 0.8
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 42,
            "latency_ms": 850.2,
            "usd": 0.000512,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"HARMONY_REGIONAL\",\"STRUCT_INTENSITY_MATCH\"]",
          "compliant": true,
          "score": 0.6,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_INTENSITY_MATCH"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "precision": 0.6,
            "recall": 0.6
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 32,
            "latency_ms": 661.8,
            "usd": 0.000496,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.1155,
        "sem": 0.0667,
        "ci95_lo": 0.536,
        "ci95_hi": 0.7973,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1111.0,
        "output_tokens_mean": 35.333,
        "latency_ms_mean": 763.7,
        "usd_sum": 0.001504
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-011",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "mexican",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "1dfeb782059a4814",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"tacos al pastor with pineapple and chili\",\n  \"beverage\": \"dry sparkling rosé (Cava rosado)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "REFRESH_EFFERVESCE",
          "AROMA_SHARED_FRUIT",
          "CONTRAST_RICH_CRISP"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_FRUIT\",\n  \"CONTRAST_SWEET_ACID\",\n  \"REFRESH_EFFERVESCE\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"HARMONY_FLAVOR_CONVERGE\"\n]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_SWEET_ACID",
              "HARMONY_FLAVOR_CONVERGE",
              "REFRESH_EFFERVESCE",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_RICH_CRISP",
              "REFRESH_EFFERVESCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1117,
            "output_tokens": 51,
            "latency_ms": 1633.4,
            "usd": 0.000528,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_FRUIT\",\"AROMA_SHARED_SPICE\",\"CONTRAST_SWEET_ACID\",\"REFRESH_EFFERVESCE\",\"STRUCT_ACIDITY_BALANCE\"]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_SPICE",
              "CONTRAST_SWEET_ACID",
              "REFRESH_EFFERVESCE",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_RICH_CRISP",
              "REFRESH_EFFERVESCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1117,
            "output_tokens": 37,
            "latency_ms": 779.7,
            "usd": 0.000506,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"AROMA_SHARED_FRUIT\",\n  \"CONTRAST_SWEET_ACID\",\n  \"REFRESH_EFFERVESCE\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"HARMONY_REGIONAL\"\n]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_SWEET_ACID",
              "HARMONY_REGIONAL",
              "REFRESH_EFFERVESCE",
              "STRUCT_ACIDITY_BALANCE"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_RICH_CRISP",
              "REFRESH_EFFERVESCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1117,
            "output_tokens": 48,
            "latency_ms": 1031.5,
            "usd": 0.000524,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.4444,
      "score_summary": {
        "mean": 0.4444,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.4444,
        "ci95_hi": 0.4444,
        "n": 3
      },
      "score_if_valid": 0.4444,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1117.0,
        "output_tokens_mean": 45.333,
        "latency_ms_mean": 1148.2,
        "usd_sum": 0.001558
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-012",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "28df36c8ae155697",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"duck confit, crispy skin\",\n  \"beverage\": \"mature Burgundy Pinot Noir (10 years)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_TANNIN_FAT",
          "AROMA_SHARED_EARTH",
          "HARMONY_REGIONAL",
          "HARMONY_COMPLEXITY_MATCH"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_WOOD\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"STRUCT_ALCOHOL_WARMTH\",\n  \"STRUCT_BODY_ALIGN\",\n  \"STRUCT_INTENSITY_MATCH\"\n]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_WOOD",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_COMPLEXITY_MATCH",
              "HARMONY_REGIONAL",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 44,
            "latency_ms": 978.8,
            "usd": 0.000516,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_WOOD\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_BODY_ALIGN\",\"HARMONY_FLAVOR_CONVERGE\"]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_WOOD",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_COMPLEXITY_MATCH",
              "HARMONY_REGIONAL",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 35,
            "latency_ms": 835.8,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"AROMA_SHARED_FRUIT\",\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"STRUCT_ALCOHOL_WARMTH\",\n  \"STRUCT_BODY_ALIGN\"\n]",
          "compliant": true,
          "score": 0.2222,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "AROMA_SHARED_FRUIT",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_EARTH",
              "HARMONY_COMPLEXITY_MATCH",
              "HARMONY_REGIONAL",
              "STRUCT_TANNIN_FAT"
            ],
            "precision": 0.2,
            "recall": 0.25
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 45,
            "latency_ms": 916.1,
            "usd": 0.000518,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0741,
      "score_summary": {
        "mean": 0.0741,
        "stdev": 0.1283,
        "sem": 0.0741,
        "ci95_lo": -0.0711,
        "ci95_hi": 0.2192,
        "n": 3
      },
      "score_if_valid": 0.0741,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1114.0,
        "output_tokens_mean": 41.333,
        "latency_ms_mean": 910.233,
        "usd_sum": 0.001536
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-013",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "spanish",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "18a20e7e30844c41",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"jamón ibérico de bellota, sliced thin\",\n  \"beverage\": \"chilled Fino sherry (Manzanilla)\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_UMAMI_ENHANCE",
          "HARMONY_REGIONAL",
          "AROMA_SHARED_SAVORY",
          "REFRESH_MINERAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"STRUCT_SWEET_OFFSET\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_TANNIN_PROTEIN\",\"REFRESH_ACID\",\"HARMONY_REGIONAL\"]",
          "compliant": true,
          "score": 0.2222,
          "detail": {
            "predicted": [
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_SAVORY",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.2,
            "recall": 0.25
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 33,
            "latency_ms": 801.6,
            "usd": 0.000501,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"REFRESH_ACID\",\n  \"STRUCT_ALCOHOL_WARMTH\",\n  \"REFRESH_EFFERVESCE\"\n]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "REFRESH_ACID",
              "REFRESH_EFFERVESCE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_SAVORY",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 46,
            "latency_ms": 959.3,
            "usd": 0.000522,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"AROMA_SHARED_SALTY\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"REFRESH_ACID\",\n  \"STRUCT_ALCOHOL_WARMTH\"\n]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_SALTY",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_SAVORY",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 45,
            "latency_ms": 795.8,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0741,
      "score_summary": {
        "mean": 0.0741,
        "stdev": 0.1283,
        "sem": 0.0741,
        "ci95_lo": -0.0711,
        "ci95_hi": 0.2192,
        "n": 3
      },
      "score_if_valid": 0.0741,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1120.0,
        "output_tokens_mean": 41.333,
        "latency_ms_mean": 852.233,
        "usd_sum": 0.001543
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-014",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "korean",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "f9be4ef2853d67b3",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"bulgogi: soy-pear-garlic marinated grilled beef\",\n  \"beverage\": \"medium-bodied unoaked Merlot\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_TANNIN_PROTEIN",
          "STRUCT_SWEET_OFFSET",
          "AROMA_SHARED_FRUIT",
          "STRUCT_BODY_ALIGN"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_FRUIT\",\n  \"AROMA_SHARED_SPICE\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"STRUCT_BODY_ALIGN\",\n  \"HARMONY_FLAVOR_CONVERGE\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_SPICE",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 45,
            "latency_ms": 904.4,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_FRUIT\",\"AROMA_SHARED_SPICE\",\"STRUCT_BODY_ALIGN\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ALCOHOL_WARMTH\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_SPICE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 33,
            "latency_ms": 1182.3,
            "usd": 0.000501,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"HARMONY_FLAVOR_CONVERGE\",\"STRUCT_BODY_ALIGN\",\"STRUCT_TANNIN_PROTEIN\",\"AROMA_SHARED_FRUIT\",\"AROMA_SHARED_SPICE\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "AROMA_SHARED_SPICE",
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1120,
            "output_tokens": 34,
            "latency_ms": 876.6,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1120.0,
        "output_tokens_mean": 37.333,
        "latency_ms_mean": 987.767,
        "usd_sum": 0.001523
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-015",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "easy",
      "cuisine_region": "british",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "5474f731e5d9751c",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"Stilton blue cheese\",\n  \"beverage\": \"Vintage Port\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "CONTRAST_SWEET_SALTY",
          "STRUCT_INTENSITY_MATCH",
          "HARMONY_COMPLEXITY_MATCH",
          "STRUCT_SWEET_OFFSET"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"STRUCT_SWEET_OFFSET\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_BODY_ALIGN\",\"HARMONY_FLAVOR_CONVERGE\",\"STRUCT_ALCOHOL_WARMTH\"]",
          "compliant": true,
          "score": 0.2222,
          "detail": {
            "predicted": [
              "HARMONY_FLAVOR_CONVERGE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.2,
            "recall": 0.25
          },
          "metrics": {
            "input_tokens": 1107,
            "output_tokens": 36,
            "latency_ms": 850.6,
            "usd": 0.0005,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"CONTRAST_SWEET_SALTY\",\"STRUCT_TANNIN_FAT\",\"STRUCT_SWEET_OFFSET\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_SALTY",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_FAT"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.5,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1107,
            "output_tokens": 28,
            "latency_ms": 676.5,
            "usd": 0.000488,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"CONTRAST_SWEET_SALTY\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_SWEET_OFFSET\",\"HARMONY_COMPLEXITY_MATCH\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1107,
            "output_tokens": 37,
            "latency_ms": 690.6,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.463,
      "score_summary": {
        "mean": 0.463,
        "stdev": 0.2246,
        "sem": 0.1296,
        "ci95_lo": 0.2089,
        "ci95_hi": 0.7171,
        "n": 3
      },
      "score_if_valid": 0.463,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1107.0,
        "output_tokens_mean": 33.667,
        "latency_ms_mean": 739.233,
        "usd_sum": 0.00149
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-016",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "japanese",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "fc7f227aebed6756",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"shrimp tempura, light fry\",\n  \"beverage\": \"chilled junmai ginjo sake\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_UMAMI_ENHANCE",
          "REFRESH_MINERAL",
          "AROMA_SHARED_FLORAL",
          "STRUCT_INTENSITY_MATCH",
          "HARMONY_REGIONAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"STRUCT_BODY_ALIGN\",\n  \"HARMONY_TEXTURE_MATCH\"\n]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_TEXTURE_MATCH",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "AROMA_SHARED_FLORAL",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 41,
            "latency_ms": 903.5,
            "usd": 0.000511,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\"AROMA_SHARED_FRUIT\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"HARMONY_COMPLEXITY_MATCH\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.0,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "HARMONY_COMPLEXITY_MATCH",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "AROMA_SHARED_FLORAL",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.0,
            "recall": 0.0
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 31,
            "latency_ms": 750.4,
            "usd": 0.000495,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_FRUIT\",\"STRUCT_ACIDITY_BALANCE\",\"REFRESH_ACID\",\"STRUCT_UMAMI_ENHANCE\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.2,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "AROMA_SHARED_FLORAL",
              "HARMONY_REGIONAL",
              "REFRESH_MINERAL",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "precision": 0.2,
            "recall": 0.2
          },
          "metrics": {
            "input_tokens": 1114,
            "output_tokens": 31,
            "latency_ms": 744.1,
            "usd": 0.000495,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.0667,
      "score_summary": {
        "mean": 0.0667,
        "stdev": 0.1155,
        "sem": 0.0667,
        "ci95_lo": -0.064,
        "ci95_hi": 0.1973,
        "n": 3
      },
      "score_if_valid": 0.0667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1114.0,
        "output_tokens_mean": 34.333,
        "latency_ms_mean": 799.333,
        "usd_sum": 0.001501
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-017",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "greek",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "024198497e327bb1",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"horiatiki salad with feta, tomato, olive, oregano\",\n  \"beverage\": \"Santorini Assyrtiko\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "REFRESH_ACID",
          "REFRESH_MINERAL",
          "HARMONY_REGIONAL",
          "AROMA_SHARED_HERBAL"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\n  \"AROMA_SHARED_HERBAL\",\n  \"REFRESH_ACID\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"HARMONY_REGIONAL\",\n  \"STRUCT_UMAMI_ENHANCE\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1119,
            "output_tokens": 45,
            "latency_ms": 1100.8,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_HERBAL\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"HARMONY_REGIONAL\",\n  \"STRUCT_UMAMI_ENHANCE\",\n  \"REFRESH_ACID\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_UMAMI_ENHANCE"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1119,
            "output_tokens": 45,
            "latency_ms": 906.9,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_HERBAL\",\"STRUCT_ACIDITY_BALANCE\",\"HARMONY_REGIONAL\",\"REFRESH_ACID\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN"
            ],
            "expected": [
              "AROMA_SHARED_HERBAL",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "REFRESH_MINERAL"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1119,
            "output_tokens": 30,
            "latency_ms": 869.2,
            "usd": 0.000496,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.6667,
      "score_summary": {
        "mean": 0.6667,
        "stdev": 0.0,
        "sem": 0.0,
        "ci95_lo": 0.6667,
        "ci95_hi": 0.6667,
        "n": 3
      },
      "score_if_valid": 0.6667,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1119.0,
        "output_tokens_mean": 40.0,
        "latency_ms_mean": 958.967,
        "usd_sum": 0.001536
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-018",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "hard",
      "cuisine_region": "chinese",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "283bbb9e703e6e30",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"Peking duck with pancake, scallion, hoisin\",\n  \"beverage\": \"10-year-old off-dry Mosel Riesling\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "AROMA_SHARED_FRUIT",
          "REFRESH_ACID",
          "HARMONY_COMPLEXITY_MATCH"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"AROMA_SHARED_FRUIT\",\"CONTRAST_SWEET_ACID\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_SWEET_OFFSET\",\"STRUCT_BODY_ALIGN\"]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_SWEET_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "HARMONY_COMPLEXITY_MATCH",
              "REFRESH_ACID",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1121,
            "output_tokens": 33,
            "latency_ms": 740.7,
            "usd": 0.000501,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"CONTRAST_SWEET_ACID\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"STRUCT_ALCOHOL_WARMTH\",\n  \"AROMA_SHARED_FRUIT\"\n]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "CONTRAST_SWEET_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "HARMONY_COMPLEXITY_MATCH",
              "REFRESH_ACID",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1121,
            "output_tokens": 48,
            "latency_ms": 904.2,
            "usd": 0.000525,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\n  \"AROMA_SHARED_FRUIT\",\n  \"STRUCT_SWEET_OFFSET\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"REFRESH_ACID\",\n  \"HARMONY_REGIONAL\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_FRUIT",
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_SWEET_OFFSET"
            ],
            "expected": [
              "AROMA_SHARED_FRUIT",
              "HARMONY_COMPLEXITY_MATCH",
              "REFRESH_ACID",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1121,
            "output_tokens": 42,
            "latency_ms": 902.7,
            "usd": 0.000516,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.5185,
      "score_summary": {
        "mean": 0.5185,
        "stdev": 0.1283,
        "sem": 0.0741,
        "ci95_lo": 0.3733,
        "ci95_hi": 0.6637,
        "n": 3
      },
      "score_if_valid": 0.5185,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1121.0,
        "output_tokens_mean": 41.0,
        "latency_ms_mean": 849.2,
        "usd_sum": 0.001542
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-019",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "medium",
      "cuisine_region": "italian",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "9ea6cd79384f9ee2",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"Sunday ragù napoletano (long-cooked beef and pork)\",\n  \"beverage\": \"Campanian Aglianico, 5 years\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_TANNIN_PROTEIN",
          "HARMONY_REGIONAL",
          "STRUCT_BODY_ALIGN",
          "AROMA_SHARED_WOOD"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_ACIDITY_BALANCE\",\"STRUCT_ALCOHOL_WARMTH\",\"HARMONY_REGIONAL\"]",
          "compliant": true,
          "score": 0.4444,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_WOOD",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.4,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 37,
            "latency_ms": 931.0,
            "usd": 0.000509,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"AROMA_SHARED_EARTH\",\n  \"STRUCT_TANNIN_PROTEIN\",\n  \"STRUCT_ACIDITY_BALANCE\",\n  \"STRUCT_BODY_ALIGN\",\n  \"HARMONY_REGIONAL\"\n]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "HARMONY_REGIONAL",
              "STRUCT_ACIDITY_BALANCE",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_WOOD",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 44,
            "latency_ms": 947.7,
            "usd": 0.00052,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"AROMA_SHARED_EARTH\",\"STRUCT_TANNIN_PROTEIN\",\"STRUCT_BODY_ALIGN\",\"STRUCT_ALCOHOL_WARMTH\"]",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "AROMA_SHARED_EARTH",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "expected": [
              "AROMA_SHARED_WOOD",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_TANNIN_PROTEIN"
            ],
            "precision": 0.5,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1124,
            "output_tokens": 28,
            "latency_ms": 702.9,
            "usd": 0.000494,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.537,
      "score_summary": {
        "mean": 0.537,
        "stdev": 0.1157,
        "sem": 0.0668,
        "ci95_lo": 0.4061,
        "ci95_hi": 0.6679,
        "n": 3
      },
      "score_if_valid": 0.537,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1124.0,
        "output_tokens_mean": 36.333,
        "latency_ms_mean": 860.533,
        "usd_sum": 0.001523
      }
    },
    {
      "task_id": "ogs:core:benchmark-task:xplain-020",
      "category": "explanation",
      "capability": "explanation_code_selection",
      "difficulty": "easy",
      "cuisine_region": "french",
      "scorer": "set_f1",
      "context_needs": [
        "vocab.explanation_codes.positive"
      ],
      "prompt_template_hash": "43005f44c870684b",
      "prompt": "You are a gastronomy expert evaluated by the Open Gastronomy Standard (OGS) Benchmark.\n\nYou will be given:\n- The relevant slices of the OGS specification (controlled vocabularies, scales,\n  JSON schemas) needed to answer the task correctly.\n- A task instruction, an input payload, and an answer format.\n\nFollow these rules:\n- Use ONLY the OGS controlled vocabularies supplied below. Do not invent new\n  enum values, tag names, or code IDs.\n- When the task asks for JSON, respond with a single JSON document and nothing\n  else. Do NOT wrap JSON in markdown code fences.\n- When the task asks for a single token (e.g. 'a', '3', 'fruit'), respond with\n  just the token and no surrounding prose.\n- Intensities use the OGS 0-10 scale unless otherwise stated\n  (0 = not detectable, 10 = overwhelming).\n- If the task supplies a list of candidates, your answer must come from that\n  list exactly.\n\n## OGS Context\n\n### OGS Explanation Codes (positive only)\n\nEach code has a fixed `id` (UPPER_SNAKE_CASE). Use the exact `id` string.\n\n**aromatic_bridge**\n- `AROMA_SHARED_FRUIT` — Shared Fruit Aromatics: Both dish and beverage share fruit aromatic families, creating an aromatic bridge.\n- `AROMA_SHARED_SPICE` — Shared Spice Notes: Spice aromatics appear in both dish and beverage, linking them aromatically.\n- `AROMA_SHARED_EARTH` — Shared Earth Notes: Earthy, mineral, or fungal aromatics bridge the dish and beverage.\n- `AROMA_SHARED_HERBAL` — Shared Herbal Notes: Herbal aromatics present in both dish and beverage create continuity.\n- `AROMA_SHARED_FLORAL` — Shared Floral Notes: Floral aromatics appear in both elements, creating an elegant aromatic link.\n- `AROMA_SHARED_WOOD` — Shared Wood/Oak Notes: Wood, smoke, or oak aromatics bridge dish and beverage (e.g., grilled meat with oaked wine).\n\n**contrast**\n- `CONTRAST_SWEET_ACID` — Sweet-Acid Contrast: Sweetness in one element contrasts with acidity in the other, creating dynamic tension.\n- `CONTRAST_RICH_CRISP` — Rich-Crisp Contrast: A rich, heavy dish is contrasted by a crisp, lean beverage (or vice versa).\n- `CONTRAST_SWEET_SALTY` — Sweet-Salty Contrast: Sweetness in the beverage contrasts with saltiness in the dish, creating interest.\n- `CONTRAST_TEMP_TEXTURE` — Temperature-Texture Contrast: Temperature or textural differences between dish and beverage create pleasant contrast.\n\n**harmony**\n- `HARMONY_FLAVOR_CONVERGE` — Flavor Profile Convergence: The overall flavor profiles of dish and beverage converge, creating a unified taste experience.\n- `HARMONY_TEXTURE_MATCH` — Textural Harmony: The textures of dish and beverage complement each other harmoniously.\n- `HARMONY_REGIONAL` — Regional Affinity: Dish and beverage share a regional origin, reflecting terroir-driven compatibility.\n- `HARMONY_COMPLEXITY_MATCH` — Complexity Match: Both dish and beverage have similar complexity levels, so neither outshines the other.\n\n**positive_structural**\n- `STRUCT_INTENSITY_MATCH` — Intensity Match: The overall flavor intensity of the dish and beverage are well-aligned, so neither overpowers the other.\n- `STRUCT_BODY_ALIGN` — Body Alignment: The body/weight of the dish matches the body of the beverage, creating a balanced mouthfeel.\n- `STRUCT_ACIDITY_BALANCE` — Acidity Balance: The beverage's acidity cuts through richness or matches the dish's acidity, creating freshness.\n- `STRUCT_TANNIN_FAT` — Tannin-Fat Synergy: Tannins in the beverage are softened by fat in the dish, creating a smooth, integrated sensation.\n- `STRUCT_TANNIN_PROTEIN` — Tannin-Protein Synergy: Proteins in the dish bind with tannins, reducing astringency and enhancing mouthfeel.\n- `STRUCT_SWEET_OFFSET` — Sweetness Offset: Residual sweetness in the beverage balances salt, spice, or acidity in the dish.\n- `STRUCT_UMAMI_ENHANCE` — Umami Enhancement: Umami in the dish enhances the perceived fruit and body of the beverage.\n- `STRUCT_ALCOHOL_WARMTH` — Alcohol Warmth Complement: The warmth of alcohol complements rich, hearty dishes without overpowering.\n\n**refresh**\n- `REFRESH_EFFERVESCE` — Effervescence Cleanse: Carbonation in the beverage physically scrubs the palate between bites of rich food.\n- `REFRESH_ACID` — Acid Refresh: High acidity in the beverage refreshes the palate, cutting through fat and richness.\n- `REFRESH_MINERAL` — Mineral Reset: Mineral character in the beverage provides a clean, palate-resetting effect.\n\n---\n\nReturn the OGS positive explanation codes that best explain why the pairing below works. Respond as a JSON array of code IDs (3 to 5 codes).\n\nINPUT:\n{\n  \"dish\": \"foie gras torchon with brioche\",\n  \"beverage\": \"Sauternes\"\n}\n\nANSWER FORMAT: set_of_codes",
      "expected": {
        "values": [
          "STRUCT_SWEET_OFFSET",
          "CONTRAST_SWEET_SALTY",
          "HARMONY_REGIONAL",
          "STRUCT_BODY_ALIGN"
        ]
      },
      "samples": [
        {
          "sample": 0,
          "raw_response": "[\"STRUCT_SWEET_OFFSET\",\"STRUCT_TANNIN_FAT\",\"STRUCT_BODY_ALIGN\",\"REFRESH_ACID\",\"HARMONY_REGIONAL\"]",
          "compliant": true,
          "score": 0.6667,
          "detail": {
            "predicted": [
              "HARMONY_REGIONAL",
              "REFRESH_ACID",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_FAT"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.6,
            "recall": 0.75
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 29,
            "latency_ms": 689.9,
            "usd": 0.000491,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 1,
          "raw_response": "[\n  \"STRUCT_SWEET_OFFSET\",\n  \"STRUCT_TANNIN_FAT\",\n  \"HARMONY_REGIONAL\",\n  \"STRUCT_ALCOHOL_WARMTH\"\n]",
          "compliant": true,
          "score": 0.5,
          "detail": {
            "predicted": [
              "HARMONY_REGIONAL",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_FAT"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.5,
            "recall": 0.5
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 37,
            "latency_ms": 892.4,
            "usd": 0.000504,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        },
        {
          "sample": 2,
          "raw_response": "[\"STRUCT_SWEET_OFFSET\",\"STRUCT_TANNIN_FAT\",\"STRUCT_ALCOHOL_WARMTH\",\"STRUCT_INTENSITY_MATCH\",\"HARMONY_COMPLEXITY_MATCH\"]",
          "compliant": true,
          "score": 0.2222,
          "detail": {
            "predicted": [
              "HARMONY_COMPLEXITY_MATCH",
              "STRUCT_ALCOHOL_WARMTH",
              "STRUCT_INTENSITY_MATCH",
              "STRUCT_SWEET_OFFSET",
              "STRUCT_TANNIN_FAT"
            ],
            "expected": [
              "CONTRAST_SWEET_SALTY",
              "HARMONY_REGIONAL",
              "STRUCT_BODY_ALIGN",
              "STRUCT_SWEET_OFFSET"
            ],
            "precision": 0.2,
            "recall": 0.25
          },
          "metrics": {
            "input_tokens": 1111,
            "output_tokens": 36,
            "latency_ms": 595.1,
            "usd": 0.000502,
            "model_version": "gpt-4.1-mini-2025-04-14",
            "finish_reason": "stop"
          }
        }
      ],
      "score": 0.463,
      "score_summary": {
        "mean": 0.463,
        "stdev": 0.2246,
        "sem": 0.1296,
        "ci95_lo": 0.2089,
        "ci95_hi": 0.7171,
        "n": 3
      },
      "score_if_valid": 0.463,
      "format_rate": 1.0,
      "metrics": {
        "input_tokens_mean": 1111.0,
        "output_tokens_mean": 34.0,
        "latency_ms_mean": 725.8,
        "usd_sum": 0.001497
      }
    }
  ],
  "category_scores": {
    "composition": {
      "mean_score": 0.8045,
      "ci95_lo": 0.6381,
      "ci95_hi": 0.9709,
      "stdev": 0.3797,
      "task_count": 20,
      "weight": 0.12,
      "format_rate": 1.0,
      "score_if_valid": 0.8045
    },
    "dietary": {
      "mean_score": 0.5782,
      "ci95_lo": 0.3481,
      "ci95_hi": 0.8084,
      "stdev": 0.4067,
      "task_count": 12,
      "weight": 0.06,
      "format_rate": 1.0,
      "score_if_valid": 0.5782
    },
    "explanation": {
      "mean_score": 0.5012,
      "ci95_lo": 0.4066,
      "ci95_hi": 0.5958,
      "stdev": 0.2158,
      "task_count": 20,
      "weight": 0.1,
      "format_rate": 1.0,
      "score_if_valid": 0.5012
    },
    "menu_engineering": {
      "mean_score": 0.9167,
      "ci95_lo": 0.7533,
      "ci95_hi": 1.08,
      "stdev": 0.2357,
      "task_count": 8,
      "weight": 0.03,
      "format_rate": 1.0,
      "score_if_valid": 0.9167
    },
    "nutrition": {
      "mean_score": 0.377,
      "ci95_lo": 0.1087,
      "ci95_hi": 0.6452,
      "stdev": 0.4327,
      "task_count": 10,
      "weight": 0.03,
      "format_rate": 1.0,
      "score_if_valid": 0.377
    },
    "pairing": {
      "mean_score": 1.0,
      "ci95_lo": 1.0,
      "ci95_hi": 1.0,
      "stdev": 0.0,
      "task_count": 22,
      "weight": 0.15,
      "format_rate": 1.0,
      "score_if_valid": 1.0
    },
    "regional": {
      "mean_score": 0.7917,
      "ci95_lo": 0.6455,
      "ci95_hi": 0.9378,
      "stdev": 0.3653,
      "task_count": 24,
      "weight": 0.05,
      "format_rate": 1.0,
      "score_if_valid": 0.7917
    },
    "risk": {
      "mean_score": 0.9,
      "ci95_lo": 0.7651,
      "ci95_hi": 1.0349,
      "stdev": 0.3078,
      "task_count": 20,
      "weight": 0.08,
      "format_rate": 1.0,
      "score_if_valid": 0.9
    },
    "safety": {
      "mean_score": 0.6328,
      "ci95_lo": 0.3606,
      "ci95_hi": 0.9051,
      "stdev": 0.4812,
      "task_count": 12,
      "weight": 0.06,
      "format_rate": 1.0,
      "score_if_valid": 0.6328
    },
    "sensory": {
      "mean_score": 0.5956,
      "ci95_lo": 0.4704,
      "ci95_hi": 0.7209,
      "stdev": 0.2928,
      "task_count": 21,
      "weight": 0.12,
      "format_rate": 1.0,
      "score_if_valid": 0.5956
    },
    "structured_output": {
      "mean_score": 0.8667,
      "ci95_lo": 0.7565,
      "ci95_hi": 0.9768,
      "stdev": 0.2513,
      "task_count": 20,
      "weight": 0.12,
      "format_rate": 1.0,
      "score_if_valid": 0.8667
    },
    "vocabulary": {
      "mean_score": 0.8551,
      "ci95_lo": 0.733,
      "ci95_hi": 0.9771,
      "stdev": 0.2986,
      "task_count": 23,
      "weight": 0.08,
      "format_rate": 1.0,
      "score_if_valid": 0.8551
    }
  },
  "aggregate": {
    "score": 76.36,
    "ci95_lo": 72.24,
    "ci95_hi": 80.48,
    "scale": "0-100",
    "partial": false,
    "missing_categories": []
  },
  "metrics": {
    "latency_ms_mean": 1162.669,
    "latency_ms_median": 634.067,
    "input_tokens_mean": 729.8,
    "output_tokens_mean": 53.5,
    "usd_total": 0.241283,
    "tasks_with_usd": 213
  },
  "weights": {
    "vocabulary": 0.08,
    "sensory": 0.12,
    "composition": 0.12,
    "pairing": 0.15,
    "explanation": 0.1,
    "risk": 0.08,
    "structured_output": 0.12,
    "regional": 0.05,
    "dietary": 0.06,
    "safety": 0.06,
    "nutrition": 0.03,
    "menu_engineering": 0.03
  },
  "judge": {
    "judge_model": "gpt-5.4",
    "judge_prompt_hash": "756db6a487714bca",
    "judge_validated": false,
    "excluded_task_ids": [
      "ogs:core:benchmark-task:menu-009"
    ],
    "note": "Rubric-judge tasks are scored and retained in `tasks`, but excluded from `aggregate` until the judge clears the human-validation protocol (r > 0.8 against a 50-task sample)."
  }
}