Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: ArrowNotImplementedError Message: Cannot write struct type 'config' with no child field to Parquet. Consider adding a dummy child field. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 620, in write_table self._build_writer(inferred_schema=pa_table.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'config' with no child field to Parquet. Consider adding a dummy child field. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1886, in _prepare_split_single num_examples, num_bytes = writer.finalize() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 639, in finalize self._build_writer(self.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'config' with no child field to Parquet. Consider adding a dummy child field. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
uid
string | id
string | organization
string | name
string | created_at
timestamp[us] | last_modified
string | trending_score
int64 | likes
int64 | tags
sequence | config
dict | results
dict | runtime_stage
string | card_data
dict | sources
sequence | enriched
dict | approval_status
string | editor_short_description
string |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
647848ca9c1f42c1f4d7e033 | gaia-benchmark/leaderboard | gaia-benchmark | leaderboard | 2023-06-01T07:29:14 | 2025-02-07 06:20:33 | 23 | 262 | [
"modality:text",
"modality:agent",
"modality:video",
"test:private",
"modality:image",
"gradio",
"submission:automatic",
"region:us",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π¦Ύ",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "GAIA Leaderboard"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 621,
"daysSinceModification": 4,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | A leaderboard for tool augmented LLMs! |
656449ea771319d93b10fe07 | protectai/prompt-injection-benchmark | protectai | prompt-injection-benchmark | 2023-11-27T07:48:58 | 2024-11-20 17:26:06 | 1 | 13 | [
"eval:safety",
"modality:text",
"test:private",
"gradio",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "yellow",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.6.0",
"short_description": null,
"title": "Prompt Injection Detection Benchmark"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 442,
"daysSinceModification": 82,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | No data inside table. A benchmark that evaluates different prompt injection detection systems by measuring their ability to identify and prevent malicious prompts. |
666f8193d148ca0bcfbca2ed | Intel/UnlearnDiffAtk-Benchmark | Intel | UnlearnDiffAtk-Benchmark | 2024-06-17T00:21:39 | 2025-02-04 15:58:57 | 1 | 7 | [
"eval:safety",
"eval:generation",
"submission:manual",
"modality:image",
"gradio",
"region:us",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "UnlearnDiffAtk Benchmark"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 239,
"daysSinceModification": 6,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark that evaluates how well diffusion models can unlearn specific concepts while maintaining generation quality and prompt alignment. |
66b4896bcc8441dc730567e5 | panuthept/thai_sentence_embedding_benchmark | panuthept | thai_sentence_embedding_benchmark | 2024-08-08T09:01:31 | 2024-08-08 16:22:44 | 1 | 12 | [
"modality:text",
"language:thai",
"gradio",
"test:public",
"submission:semiautomatic",
"modality:artefacts",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "Thai Sentence Embedding Benchmark"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 187,
"daysSinceModification": 186,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark that evaluates Thai sentence embedding models across multiple tasks including semantic similarity, classification, and retrieval. |
6730c9858b5a645918504e5b | StarscreamDeceptions/Multilingual-MMLU-Benchmark-Leaderboard | StarscreamDeceptions | Multilingual-MMLU-Benchmark-Leaderboard | 2024-11-10T14:56:05 | 2024-11-25 07:51:11 | 1 | 10 | [
"language:swahili",
"language:italian",
"language:hindi",
"language:portugese",
"language:chinese",
"language:french",
"MMMLU",
"machine learning",
"eval:generation",
"gradio",
"benchmark",
"language:indonesian",
"language:english",
"language:german",
"language:yoruba",
"language:arabic",
"submission:automatic",
"language:bengali",
"leaderboard",
"judge:auto",
"modality:text",
"multilingual",
"language:spanish",
"test:public",
"language:japanese"
] | {} | {
"results": {
"last_modified": "2024-11-13T16:41:46.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "pink",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "π Multilingual MMLU Benchmark Leaderboard"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 92,
"daysSinceModification": 78,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | This leaderboard is dedicated to evaluating and comparing the multilingual capabilities. |
6687e8366d98cab219a29b72 | ttsds/benchmark | ttsds | benchmark | 2024-07-05T12:33:58 | 2024-08-31 19:50:13 | 1 | 21 | [
"submission:semiautomatic",
"eval:generation",
"tts",
"gradio",
"test:public",
"modality:audio",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2024-11-19T16:49:02.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "mit",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": "Text-To-Speech (TTS) Evaluation using objective metrics.",
"title": "TTSDS Benchmark and Leaderboard"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": null,
"modality": [
"audio"
]
},
"categoryCounts": {
"eval": 1,
"language": null,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"semiautomatic"
],
"test": [
"public"
]
},
"daysSinceCreation": 221,
"daysSinceModification": 163,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": true,
"isRunning": true
},
"score": 4
}
} | approved | Compares the quality of speech generation by text-to-speech models using automated metrics. |
65f6886bbe57cd07cb51aa8f | Marqo/CLIP-benchmarks | Marqo | CLIP-benchmarks | 2024-03-17T06:06:35 | 2024-08-07 06:24:27 | 1 | 11 | [
"streamlit"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": false,
"sdk": "streamlit",
"sdk_version": "1.25.0",
"short_description": null,
"title": "CLIP Benchmarks"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 331,
"daysSinceModification": 188,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
6719d6a46937670ca681151e | ArtificialAnalysis/Video-Generation-Arena-Leaderboard | ArtificialAnalysis | Video-Generation-Arena-Leaderboard | 2024-10-24T05:09:56 | 2024-11-24 20:14:48 | 6 | 49 | [
"static",
"modality:video",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "green",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": "Leaderboard and arena of Video Generation models",
"title": "Video Generation Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 110,
"daysSinceModification": 78,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
671bd9d914479a1a119853f0 | argmaxinc/whisperkit-benchmarks | argmaxinc | whisperkit-benchmarks | 2024-10-25T17:48:09 | 2024-12-31 00:07:47 | 0 | 17 | [
"gradio",
"modality:audio"
] | {} | null | RUNNING | {
"app_file": "main.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": null,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "WhisperKit Benchmarks"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 108,
"daysSinceModification": 42,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65bf93c6ba3ff03d686c3667 | utrobinmv/TREX_benchmark_en_ru_zh | utrobinmv | TREX_benchmark_en_ru_zh | 2024-02-04T13:40:22 | 2024-11-04 20:37:05 | 0 | 6 | [
"language:russian",
"gradio",
"language:chinese"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "gray",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.37.2",
"short_description": null,
"title": "TREX Benchmark En Ru Zh"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 373,
"daysSinceModification": 98,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65a5a7c26145ebc6e7e39243 | TTS-AGI/TTS-Arena | TTS-AGI | TTS-Arena | 2024-01-15T21:46:42 | 2025-01-30 16:45:06 | 10 | 630 | [
"arena",
"gradio",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": "zlib",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.1.0",
"short_description": "Vote on the latest TTS models!",
"title": "TTS Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 392,
"daysSinceModification": 11,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
672b47b91b5f7a5e97a0e631 | Marqo/Ecommerce-Embedding-Benchmarks | Marqo | Ecommerce-Embedding-Benchmarks | 2024-11-06T10:40:57 | 2024-11-11 15:57:46 | 0 | 17 | [
"eval:performance",
"modality:text",
"eval:generation",
"modality:image",
"gradio",
"test:public"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.5.0",
"short_description": null,
"title": "Ecommerce Embedding Benchmarks"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 97,
"daysSinceModification": 91,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Compares ecommerce embedding models on multimodal product retrieval tasks. |
669d17bbe99ea743cfde99b3 | SUSTech/ChineseSafe-Benchmark | SUSTech | ChineseSafe-Benchmark | 2024-07-21T14:14:19 | 2024-12-28 06:53:49 | 0 | 11 | [
"eval:safety",
"modality:text",
"submission:manual",
"test:private",
"language:chinese",
"gradio",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "purple",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.38.1",
"short_description": null,
"title": "ChineseSafe"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 204,
"daysSinceModification": 45,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark that evaluates LLMs' ability to moderate Chinese content by measuring their performance in identifying safe and unsafe text across multiple categories. |
657b23848e7790a347c7e4ea | JetBrains-Research/long-code-arena | JetBrains-Research | long-code-arena | 2023-12-14T15:47:16 | 2024-06-19 08:14:50 | 1 | 33 | [
"eval:code",
"gradio",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "ποΈ",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": null,
"title": "Long Code Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 424,
"daysSinceModification": 237,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
651f831f128d26b399db9ea5 | dylanebert/3d-arena | dylanebert | 3d-arena | 2023-10-06T03:46:39 | 2025-02-06 05:38:44 | 5 | 252 | [
"docker",
"modality:3d",
"judge:humans",
"test:public",
"region:us"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "gray",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π’",
"license": "mit",
"pinned": false,
"sdk": "docker",
"sdk_version": null,
"short_description": null,
"title": "3D Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 494,
"daysSinceModification": 5,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | The 3D Arena leaderboard evaluates generative 3D models. |
6691e2b0804994abfc2d81c4 | mteb/arena | mteb | arena | 2024-07-13T02:13:04 | 2025-01-27 02:22:41 | 3 | 91 | [
"arena",
"static",
"modality:artefacts",
"judge:humans",
"region:us",
"leaderboard"
] | {} | {
"results": {
"last_modified": "2025-02-10T13:28:43.000Z"
}
} | RUNNING | {
"app_file": null,
"colorFrom": "indigo",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "βοΈ",
"license": null,
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "MTEB Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 213,
"daysSinceModification": 15,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | Massive Text Embedding Benchmark (MTEB) Leaderboard |
66e7fad2f0053c645b8107df | Inferless/LLM-Inference-Benchmark | Inferless | LLM-Inference-Benchmark | 2024-09-16T09:30:58 | 2024-10-03 08:38:24 | 0 | 8 | [
"gradio",
"eval:performance",
"modality:text",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "LLM Inference Benchmark"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 148,
"daysSinceModification": 131,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Compares the inference speed and performance of LLMs using different libraries. |
6468923b99182de17844bf7b | lmarena-ai/chatbot-arena-leaderboard | lmarena-ai | chatbot-arena-leaderboard | 2023-05-20T09:26:19 | 2025-02-10 06:05:22 | 49 | 3,987 | [
"modality:text",
"modality:image",
"eval: generation",
"gradio",
"judge:humans",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "indigo",
"colorTo": "green",
"duplicated_from": null,
"emoji": "ππ€",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": null,
"title": "Chatbot Arena Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 633,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Chatbot Arena is an open-source platform for evaluating AI through human preference. |
663288d87700d0f6454230ac | andrewrreed/closed-vs-open-arena-elo | andrewrreed | closed-vs-open-arena-elo | 2024-05-01T18:24:24 | 2025-01-09 00:30:14 | 1 | 146 | [
"eval:performance",
"modality:text",
"gradio",
"judge:humans",
"test:public"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π¬",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.28.3",
"short_description": null,
"title": "Open LLM Progress Tracker"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 285,
"daysSinceModification": 33,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Visualizes LLM progress through LMSYS Arena ELO ratings over time. |
674eea98c6a6ef2849b4a0ac | bgsys/background-removal-arena | bgsys | background-removal-arena | 2024-12-03T11:25:12 | 2025-01-31 09:20:11 | 3 | 60 | [
"gradio",
"modality:image",
"region:us",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "pink",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "β‘",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.7.1",
"short_description": null,
"title": "Background Removal Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 70,
"daysSinceModification": 11,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Background removal leaderboard. |
66129bee0b91dd969671f27f | meval/multilingual-chatbot-arena-leaderboard | meval | multilingual-chatbot-arena-leaderboard | 2024-04-07T13:13:18 | 2024-07-01 14:22:51 | 1 | 17 | [
"language:russian",
"modality:text",
"eval:generation",
"language:english",
"language:portuguese",
"multilingual",
"language:italian",
"language:german",
"language:korean",
"language:chinese",
"language:persian",
"gradio",
"language:polish",
"language:french",
"judge:humans",
"language:spanish",
"leaderboard",
"language:japanese"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "indigo",
"colorTo": "green",
"duplicated_from": null,
"emoji": "ππ€",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.29.0",
"short_description": "Multilingual metrics for the LMSys Arena Leaderboard",
"title": "Multilingual LMSys Chatbot Arena Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"english",
"chinese",
"russian",
"german",
"french",
"spanish",
"japanese",
"portuguese",
"italian",
"persian",
"korean",
"polish"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 12,
"modality": 1
},
"categoryValues": {
"judge": [
"humans"
],
"submission": null,
"test": null
},
"daysSinceCreation": 310,
"daysSinceModification": 224,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | A crowdsourced platform that evaluates LLMs through human preference votes using the Elo ranking system across multiple languages. |
659421c8c0a266442687437c | yhavinga/dutch-tokenizer-arena | yhavinga | dutch-tokenizer-arena | 2024-01-02T14:46:32 | 2024-11-07 14:38:12 | 1 | 8 | [
"gradio",
"modality:artefacts",
"language:dutch"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "β‘",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.28.3",
"short_description": null,
"title": "Dutch Tokenizer Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 405,
"daysSinceModification": 95,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65c9a8e7dc38a2858a77ff8d | TIGER-Lab/GenAI-Arena | TIGER-Lab | GenAI-Arena | 2024-02-12T05:13:11 | 2025-02-09 17:00:59 | 1 | 267 | [
"arena",
"eval:generation",
"modality:video",
"modality:image",
"gradio",
"judge:humans",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "purple",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.41.0",
"short_description": "Realtime Image/Video Gen AI Arena",
"title": "GenAI Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 365,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates visual AI models through human preference votes in arena battles. |
672bad068540c5e11ee6b822 | AtlaAI/judge-arena | AtlaAI | judge-arena | 2024-11-06T17:53:10 | 2025-01-31 19:15:47 | 1 | 88 | [
"gradio",
"region:us",
"modality:text",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "indigo",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π»",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.5.0",
"short_description": null,
"title": "Judge Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 96,
"daysSinceModification": 10,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
670387ebc04fc0fe267222ad | LLM360/de-arena | LLM360 | de-arena | 2024-10-07T07:04:11 | 2024-11-20 22:49:45 | 1 | 24 | [
"eval:math",
"gradio",
"eval:code",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": null,
"title": "Decentralized Arena Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 127,
"daysSinceModification": 82,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65e5154f2b28b798a0d6b975 | SkalskiP/YOLO-ARENA | SkalskiP | YOLO-ARENA | 2024-03-04T00:26:55 | 2024-05-29 06:58:11 | 0 | 30 | [
"gradio",
"modality:image",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "pink",
"colorTo": "green",
"duplicated_from": null,
"emoji": "ποΈ",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.19.2",
"short_description": null,
"title": "YOLO ARENA"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 344,
"daysSinceModification": 258,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
6710c75984a75320ee25e5aa | k-mktr/gpu-poor-llm-arena | k-mktr | gpu-poor-llm-arena | 2024-10-17T08:14:17 | 2025-01-29 20:31:14 | 5 | 180 | [
"eval:performance",
"modality:text",
"gradio",
"judge:humans",
"test:public",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.9.1",
"short_description": "Compact LLM Battle Arena: Frugal AI Face-Off!",
"title": "GPU Poor LLM Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 117,
"daysSinceModification": 12,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Evaluates small LLMs through human preference battles. |
64873060328076a994947df6 | lmarena-ai/chatbot-arena | lmarena-ai | chatbot-arena | 2023-06-12T14:49:04 | 2024-09-20 07:41:07 | 0 | 187 | [
"static",
"test:public",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "purple",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π¬",
"license": "other",
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "Chatbot Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 609,
"daysSinceModification": 144,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65a2cd9890b5e87bcdf9f2e2 | yutohub/japanese-chatbot-arena-leaderboard | yutohub | japanese-chatbot-arena-leaderboard | 2024-01-13T17:51:20 | 2024-03-08 11:16:07 | 0 | 34 | [
"language:japanese",
"modality:text",
"eval:generation",
"streamlit"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "streamlit",
"sdk_version": "1.30.0",
"short_description": null,
"title": "Japanese Chatbot Arena Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 394,
"daysSinceModification": 340,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Evaluates Japanese Large Language Models through crowdsourced pairwise comparison in a chat arena format. |
64ef26d33831f23491562014 | xu-song/tokenizer-arena | xu-song | tokenizer-arena | 2023-08-30T11:24:03 | 2025-02-05 04:39:17 | 0 | 59 | [
"eval:performance",
"gradio",
"modality:artefacts",
"judge:humans",
"tokenizer",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "β",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.38.1",
"short_description": "Compare different tokenizers in char-level and byte-level.",
"title": "Tokenizer Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 531,
"daysSinceModification": 6,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
677b478c59e4ac32ab8a4939 | Vchitect/VBench_Video_Arena | Vchitect | VBench_Video_Arena | 2025-01-06T03:01:32 | 2025-01-14 07:51:18 | 1 | 12 | [
"gradio",
"modality:video",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.9.1",
"short_description": null,
"title": "VBench Video Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 36,
"daysSinceModification": 28,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65e58abff4a700ec2ddb2533 | Pendrokar/TTS-Spaces-Arena | Pendrokar | TTS-Spaces-Arena | 2024-03-04T08:47:59 | 2025-02-10 10:48:50 | 6 | 287 | [
"arena",
"gradio",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "red",
"duplicated_from": null,
"emoji": "π€π",
"license": "zlib",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.15.0",
"short_description": "Blind vote on HF TTS models!",
"title": "TTS Spaces Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 344,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
66fd3b0a8600fb45c8ce43b2 | nyanko7/text-to-anime-arena | nyanko7 | text-to-anime-arena | 2024-10-02T12:22:34 | 2024-10-03 05:47:40 | 1 | 5 | [
"modality:image",
"streamlit",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "streamlit",
"sdk_version": "1.39.0",
"short_description": null,
"title": "Text To Anime Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 132,
"daysSinceModification": 131,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Text to Image (Anime/Illustration) Generation Arena |
65becbc4744da3e639da88d9 | HaizeLabs/red-teaming-resistance-benchmark | HaizeLabs | red-teaming-resistance-benchmark | 2024-02-03T23:27:00 | 2024-06-07 18:34:09 | 1 | 41 | [
"eval:safety",
"modality:text",
"static",
"submission:automatic",
"test:public",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "pink",
"colorTo": "red",
"duplicated_from": null,
"emoji": "π»",
"license": null,
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "Redteaming Resistance Leaderboard"
} | [
"benchmark"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 373,
"daysSinceModification": 248,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark that evaluates LLMs' resistance to adversarial prompts and safety violations across multiple categories of harmful content. |
65eac0b7e36ad838b863ebae | atomind/mlip-arena | atomind | mlip-arena | 2024-03-08T07:39:35 | 2025-01-30 07:39:54 | 0 | 11 | [
"domain:physics",
"domain:chemistry",
"region:us",
"streamlit"
] | {} | null | RUNNING | {
"app_file": "serve/app.py",
"colorFrom": null,
"colorTo": null,
"duplicated_from": null,
"emoji": "β",
"license": null,
"pinned": null,
"sdk": "streamlit",
"sdk_version": "1.36.0",
"short_description": null,
"title": "MLIP Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 340,
"daysSinceModification": 12,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark for ML models predicting molecular physics. |
664885ecb5e5f95dc65dc3d9 | Auto-Arena/Leaderboard | Auto-Arena | Leaderboard | 2024-05-18T10:41:48 | 2024-10-07 02:37:00 | 0 | 21 | [
"gradio"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.27.0",
"short_description": null,
"title": "Auto-Arena Leaderboard"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 269,
"daysSinceModification": 127,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
660bb9ccb75880c7c71ca46c | ZhangYuhan/3DGen-Arena | ZhangYuhan | 3DGen-Arena | 2024-04-02T07:54:52 | 2025-02-11 07:53:28 | 0 | 93 | [
"gradio",
"modality:3d",
"judge:humans",
"test:public",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "indigo",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π ",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.24.0",
"short_description": null,
"title": "3DGen Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 315,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | 3D Arena leaderboard evaluates generative 3D models. |
643d3016d2c1e08a5eca0c22 | open-llm-leaderboard/open_llm_leaderboard | open-llm-leaderboard | open_llm_leaderboard | 2023-04-17T11:40:06 | 2025-02-10 12:30:39 | 69 | 12,439 | [
"eval:performance",
"modality:text",
"eval:generation",
"eval:code",
"docker",
"eval:math",
"submission:automatic",
"test:public",
"region:us",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2025-02-08T04:12:28.000Z"
}
} | RUNNING | {
"app_file": null,
"colorFrom": "blue",
"colorTo": "red",
"duplicated_from": "open-llm-leaderboard/open_llm_leaderboard",
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "docker",
"sdk_version": null,
"short_description": "Track, rank and evaluate open LLMs and chatbots",
"title": "Open LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 666,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | Comparing Large Language Models in a reproducible way. |
6670f4cffc615a6257ab35dd | ksort/K-Sort-Arena | ksort | K-Sort-Arena | 2024-06-18T02:45:35 | 2025-02-06 19:51:10 | 0 | 45 | [
"arena",
"gradio",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "purple",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.21.0",
"short_description": "Efficient Image/Video K-Sort Arena",
"title": "K-Sort Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 238,
"daysSinceModification": 4,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
665596eb33fc74415352f86b | yanolja/arena | yanolja | arena | 2024-05-28T08:33:47 | 2024-11-21 06:14:19 | 0 | 7 | [
"gradio",
"modality:text",
"eval:generation",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "βοΈ",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "Yanolja Arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 259,
"daysSinceModification": 82,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Find the best LLMs for summarizing and translating texts. |
633581939ac57cf2967be686 | mteb/leaderboard | mteb | leaderboard | 2022-09-29T11:29:23 | 2025-02-05 09:48:47 | 83 | 4,739 | [
"region:us",
"docker",
"submission:semiautomatic",
"modality:artefacts",
"leaderboard"
] | {} | {
"results": {
"last_modified": "2025-02-10T13:28:43.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "mit",
"pinned": true,
"sdk": "docker",
"sdk_version": null,
"short_description": null,
"title": "MTEB Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 866,
"daysSinceModification": 6,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | An arena ranking LLMs on retrieval capabilities. |
65af98551501453abf5d8e8d | opencompass/open_vlm_leaderboard | opencompass | open_vlm_leaderboard | 2024-01-23T10:43:33 | 2025-01-20 06:49:40 | 14 | 605 | [
"eval:generation",
"modality:image",
"gradio",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": "VLMEvalKit Evaluation Results Collection ",
"title": "Open VLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 385,
"daysSinceModification": 22,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
65f0f612555caedb299e54d9 | DontPlanToEnd/UGI-Leaderboard | DontPlanToEnd | UGI-Leaderboard | 2024-03-13T00:40:50 | 2025-02-10 20:18:51 | 18 | 639 | [
"eval:safety",
"modality:text",
"eval:generation",
"submission:manual",
"test:private",
"docker",
"language:English",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "gray",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π’",
"license": "apache-2.0",
"pinned": false,
"sdk": "docker",
"sdk_version": null,
"short_description": null,
"title": "UGI Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation",
"safety"
],
"language": [
"english"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 2,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": null,
"submission": [
"manual"
],
"test": [
"private"
]
},
"daysSinceCreation": 335,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | null |
65e124cf138bd34f8ebc927d | gorilla-llm/berkeley-function-calling-leaderboard | gorilla-llm | berkeley-function-calling-leaderboard | 2024-03-01T00:43:59 | 2024-08-23 06:16:27 | 3 | 85 | [
"modality:text",
"modality:agent",
"eval:code",
"judge:auto",
"static",
"leaderboard",
"modality:tools"
] | {} | null | RUNNING | {
"app_file": "index.html",
"colorFrom": "red",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "Berkeley Function Calling Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"code"
],
"language": null,
"modality": [
"tools",
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": null,
"modality": 2
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 347,
"daysSinceModification": 172,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | Evaluates LLMs ability to call functions. |
64bea7e1f671da974e585dcf | bigcode/bigcode-models-leaderboard | bigcode | bigcode-models-leaderboard | 2023-07-24T16:33:37 | 2024-11-11 20:36:38 | 20 | 1,121 | [
"submission:semiautomatic",
"modality:text",
"eval:code",
"gradio",
"test:public",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "pink",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": null,
"title": "Big Code Models Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"code"
],
"language": null,
"modality": null
},
"categoryCounts": {
"eval": 1,
"language": null,
"modality": null
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"semiautomatic"
],
"test": [
"public"
]
},
"daysSinceCreation": 567,
"daysSinceModification": 91,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | Specialized leaderboard for models with coding capabilities π₯οΈ(Evaluates on HumanEval and MultiPL-E) |
67695a9a4f03e8728cbfb199 | adyen/DABstep | adyen | DABstep | 2024-12-23T12:42:02 | 2025-02-09 19:51:02 | 11 | 15 | [
"modality:text",
"eval:generation",
"gradio",
"submission:automatic",
"test:public",
"region:us",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "πΊ",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": "DABstep Reasoning Benchmark Leaderboard",
"title": "DABstep Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 50,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | A benchmark that measures LLMs' ability to perform data analysis by evaluating their answers to questions about multiple documents. |
678988614609aa875e136f7b | fr-gouv-coordination-ia/llm_leaderboard_fr | fr-gouv-coordination-ia | llm_leaderboard_fr | 2025-01-16T22:29:53 | 2025-02-07 23:22:26 | 23 | 23 | [
"docker",
"judge:function",
"language:french",
"submission:automatic",
"test:public",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "blue",
"colorTo": "red",
"duplicated_from": "open-llm-leaderboard/open_llm_leaderboard",
"emoji": "ππ«π·",
"license": "apache-2.0",
"pinned": true,
"sdk": "docker",
"sdk_version": null,
"short_description": "Track, rank and evaluate open LLMs and chatbots in French",
"title": "Leaderboard LLM FR"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 25,
"daysSinceModification": 3,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
65650d01a0623adbd7387390 | vectara/leaderboard | vectara | leaderboard | 2023-11-27T21:41:21 | 2025-01-15 17:12:55 | 6 | 97 | [
"gradio",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.0",
"short_description": null,
"title": "HHEM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 441,
"daysSinceModification": 26,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
65944138a260709928710fb6 | allenai/reward-bench | allenai | reward-bench | 2024-01-02T17:00:40 | 2024-12-11 20:55:17 | 7 | 328 | [
"eval:safety",
"eval:performance",
"modality:text",
"gradio",
"test:public",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "pink",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.36.0",
"short_description": null,
"title": "Reward Bench Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 405,
"daysSinceModification": 61,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates reward models across chat, safety and reasoning tasks. |
655b6e9dc11dee7f7e8e5c09 | hallucinations-leaderboard/leaderboard | hallucinations-leaderboard | leaderboard | 2023-11-20T14:35:09 | 2024-06-12 04:22:31 | 2 | 130 | [
"gradio",
"leaderboard"
] | {} | {
"results": {
"last_modified": "2024-10-31T20:32:52.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": null,
"title": "Hallucinations Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 448,
"daysSinceModification": 244,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | null |
6662b2c6cc6519da32cd6f4d | bigcode/bigcodebench-leaderboard | bigcode | bigcodebench-leaderboard | 2024-06-07T07:12:06 | 2025-02-04 20:18:34 | 6 | 181 | [
"eval:code",
"gradio",
"test:public",
"region:us",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.44.0",
"short_description": null,
"title": "BigCodeBench Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"code"
],
"language": null,
"modality": null
},
"categoryCounts": {
"eval": 1,
"language": null,
"modality": null
},
"categoryValues": {
"judge": [
"auto"
],
"submission": null,
"test": [
"public"
]
},
"daysSinceCreation": 249,
"daysSinceModification": 6,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | null |
672b762fd40b55aa6f62e8f2 | elmresearchcenter/open_universal_arabic_asr_leaderboard | elmresearchcenter | open_universal_arabic_asr_leaderboard | 2024-11-06T13:59:11 | 2025-02-04 07:54:52 | 3 | 20 | [
"gradio",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": "A benchmark for open-source multi-dialect Arabic ASR models",
"title": "Open Universal Arabic Asr Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 97,
"daysSinceModification": 7,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
64943e5108f840ed960f312a | optimum/llm-perf-leaderboard | optimum | llm-perf-leaderboard | 2023-06-22T12:28:01 | 2025-02-03 11:16:07 | 4 | 419 | [
"llm performance leaderboard",
"llm",
"llm perf leaderboard",
"gradio",
"performance",
"region:us",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "πποΈ",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.14.0",
"short_description": null,
"title": "LLM-Perf Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 600,
"daysSinceModification": 8,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
66fd89b752e2309fb9c19e6c | navidved/open_persian_asr_leaderboard | navidved | open_persian_asr_leaderboard | 2024-10-02T17:58:15 | 2024-11-06 15:55:44 | 2 | 8 | [
"gradio",
"modality:audio",
"leaderboard",
"language:persian"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.41.0",
"short_description": null,
"title": "Open Persian ASR Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 131,
"daysSinceModification": 96,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
64f9e6dd59eae6df399ba1e9 | hf-audio/open_asr_leaderboard | hf-audio | open_asr_leaderboard | 2023-09-07T15:06:05 | 2024-11-22 23:31:43 | 4 | 611 | [
"submission:semiautomatic",
"eval:performance",
"gradio",
"test:public",
"modality:audio",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.6.0",
"short_description": null,
"title": "Open ASR Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 522,
"daysSinceModification": 80,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates English ASR model performance and speed on public benchmarks |
65f5da0cde5e636ca24f3083 | hebrew-llm-leaderboard/leaderboard | hebrew-llm-leaderboard | leaderboard | 2024-03-16T17:42:36 | 2025-01-20 16:38:30 | 2 | 31 | [
"language:Hebrew",
"modality:text",
"eval:generation",
"gradio",
"submission:automatic",
"test:mix",
"region:us",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.0",
"short_description": null,
"title": "Hebrew LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"hebrew"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"automatic"
],
"test": [
"mix"
]
},
"daysSinceCreation": 331,
"daysSinceModification": 21,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | null |
66918a9ed4d26f854abab9c5 | ParsBench/leaderboard | ParsBench | leaderboard | 2024-07-12T19:57:18 | 2024-11-06 20:27:24 | 3 | 37 | [
"modality:text",
"eval:generation",
"gradio",
"language:persian",
"submission:automatic",
"leaderboard"
] | {} | {
"results": {
"last_modified": "2024-08-17T18:52:12.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.5.0",
"short_description": null,
"title": "Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"persian"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 213,
"daysSinceModification": 96,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": true,
"isRunning": true
},
"score": 4
}
} | approved | Compares Persian language models on diverse NLP tasks like reasoning, generation and understanding. |
659c951ed9a59ad53d6a9a37 | mlabonne/Yet_Another_LLM_Leaderboard | mlabonne | Yet_Another_LLM_Leaderboard | 2024-01-09T00:36:46 | 2024-06-16 22:12:56 | 1 | 185 | [
"modality:text",
"submission:manual",
"docker",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "red",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "docker",
"sdk_version": null,
"short_description": null,
"title": "Yet Another LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 399,
"daysSinceModification": 239,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
662a871654a69f2d529d3987 | OALL/Open-Arabic-LLM-Leaderboard | OALL | Open-Arabic-LLM-Leaderboard | 2024-04-25T16:38:46 | 2025-02-11 10:00:02 | 3 | 124 | [
"modality:text",
"language:arabic",
"gradio",
"submission:automatic",
"test:public",
"region:us",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2025-02-09T21:31:38.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.36.0",
"short_description": "Track, rank and evaluate open Arabic LLMs and chatbots",
"title": "Open Arabic LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 291,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | LLM leaderboard comparing Arabic language models' performance across various benchmarks including reasoning, language understanding and cultural alignment, using zero-shot evaluation. |
64fad4e58d50404bc4ee667f | opencompass/opencompass-llm-leaderboard | opencompass | opencompass-llm-leaderboard | 2023-09-08T08:01:41 | 2024-02-08 03:03:58 | 1 | 90 | [
"gradio",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "3.43.1",
"short_description": null,
"title": "OpenCompass LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 522,
"daysSinceModification": 369,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
659d762f50c1bbee5be20c63 | AI-Secure/llm-trustworthy-leaderboard | AI-Secure | llm-trustworthy-leaderboard | 2024-01-09T16:37:03 | 2024-11-22 05:50:44 | 1 | 88 | [
"eval:safety",
"modality:text",
"gradio",
"submission:automatic",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.37.1",
"short_description": null,
"title": "LLM Safety Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 398,
"daysSinceModification": 81,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Bias, safety, toxicity, all those things that are important to test when your chatbot actually interacts with users |
6507adb704d04d653d0155e3 | opencompass/MMBench | opencompass | MMBench | 2023-09-18T01:53:59 | 2025-01-06 03:18:10 | 1 | 20 | [
"modality:text",
"language:english",
"modality:image",
"judge:function",
"language:chinese",
"gradio",
"test:public",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": null,
"title": "MMBench Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 512,
"daysSinceModification": 36,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
65e5e7b2a87482d11980782d | Intel/powered_by_intel_llm_leaderboard | Intel | powered_by_intel_llm_leaderboard | 2024-03-04T15:24:34 | 2025-01-23 12:25:01 | 1 | 39 | [
"submission:semiautomatic",
"modality:text",
"eval:generation",
"gradio",
"test:public",
"region:us",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π»",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "Powered By Intel Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": null,
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": null,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"semiautomatic"
],
"test": [
"public"
]
},
"daysSinceCreation": 343,
"daysSinceModification": 19,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | null |
65e8f3af5686ed1f5ec30cdc | allenai/WildBench | allenai | WildBench | 2024-03-06T22:52:31 | 2024-08-06 05:40:31 | 1 | 222 | [
"submission:semiautomatic",
"judge:model",
"modality:text",
"eval:generation",
"eval:code",
"gradio",
"eval:math",
"test:public",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π¦",
"license": null,
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.19.2",
"short_description": null,
"title": "AI2 WildBench Leaderboard (V2)"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 341,
"daysSinceModification": 189,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates LLMs on real-world tasks across multiple capabilities. |
6613a26850350afe76d25129 | la-leaderboard/la-leaderboard | la-leaderboard | la-leaderboard | 2024-04-08T07:53:12 | 2024-12-16 10:53:19 | 1 | 67 | [
"modality:text",
"eval:generation",
"language:spanish",
"language:basque",
"eval: generation",
"gradio",
"language:catalan",
"language:galician",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2024-10-18T15:39:43.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "πΈ",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": "Evaluate open LLMs in the languages of LATAM and Spain.",
"title": "La Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"spanish",
"catalan",
"basque",
"galician"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 4,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"automatic"
],
"test": [
"public"
]
},
"daysSinceCreation": 309,
"daysSinceModification": 57,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": true,
"isRunning": true
},
"score": 4
}
} | approved | Evaluates LLM capabilities in Spanish varieties and official languages of Spain through comprehensive automated linguistic benchmarking across multiple regional languages. |
6639befd49238ebdde0dc911 | Intel/low_bit_open_llm_leaderboard | Intel | low_bit_open_llm_leaderboard | 2024-05-07T05:41:17 | 2024-12-23 06:20:55 | 1 | 163 | [
"eval:performance",
"modality:text",
"gradio",
"submission:automatic",
"test:public",
"modality:artefacts",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.31.5",
"short_description": "Track, rank and evaluate open LLMs and chatbots",
"title": "Low-bit Quantized Open LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 280,
"daysSinceModification": 50,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | A benchmark that evaluates low-bit quantized LLMs across multiple tasks using standardized test sets, focusing on both model performance and quantization efficiency. |
66aaf73f50bd6711f39cafb3 | allenai/ZeroEval | allenai | ZeroEval | 2024-08-01T02:47:27 | 2024-11-22 20:40:52 | 1 | 49 | [
"eval:code",
"judge:function",
"static",
"eval:math",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "indigo",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "ZeroEval Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 194,
"daysSinceModification": 80,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
6690283166f3099d1265f6b7 | allenai/ZebraLogic | allenai | ZebraLogic | 2024-07-11T18:45:05 | 2024-11-05 22:49:28 | 1 | 84 | [
"modality:text",
"eval:math",
"gradio",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π¦",
"license": null,
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.19.2",
"short_description": null,
"title": "Zebra Logic Bench"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 214,
"daysSinceModification": 97,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates LLM logical reasoning on puzzle-solving tasks. |
66742a00ccd71b5bb784b85f | m42-health/clinical_ner_leaderboard | m42-health | clinical_ner_leaderboard | 2024-06-20T13:09:20 | 2024-10-14 10:06:19 | 1 | 19 | [
"eval:performance",
"modality:text",
"language:english",
"gradio",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "Clinical NER Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": [
"text"
]
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"automatic"
],
"test": [
"public"
]
},
"daysSinceCreation": 236,
"daysSinceModification": 120,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | Evaluates clinical NER LLM capabilities across diverse medical datasets using sophisticated token and span-based evaluation metrics.
Named Clinical Entity Recognition Leaderboard. |
66bd6a9a359d1ee9690153b9 | llm-jp/open-japanese-llm-leaderboard | llm-jp | open-japanese-llm-leaderboard | 2024-08-15T02:40:26 | 2024-12-24 09:03:04 | 1 | 66 | [
"language:ζ₯ζ¬θͺ",
"modality:text",
"eval:generation",
"language:Japanese",
"gradio",
"ζ₯ζ¬θͺ",
"submission:automatic",
"test:public",
"Japanese",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "gray",
"colorTo": "gray",
"duplicated_from": null,
"emoji": "πΈ",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.9.1",
"short_description": null,
"title": "Open Japanese LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": [
"ζ₯ζ¬θͺ",
"japanese"
],
"modality": null
},
"categoryCounts": {
"eval": null,
"language": 2,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 180,
"daysSinceModification": 49,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | The Open Japanese LLM Leaderboard by LLM-jp evaluates the performance of Japanese Large Language Models (LLMs) with more than 16 tasks from classical to modern NLP tasks. |
66fead0f3a221be1070a1ed5 | open-llm-leaderboard/comparator | open-llm-leaderboard | comparator | 2024-10-03T14:41:19 | 2025-01-09 15:13:23 | 1 | 84 | [
"gradio",
"region:us",
"leaderboard"
] | {} | {
"results": {
"last_modified": "2025-02-08T04:12:28.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "gray",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": "Compare Open LLM Leaderboard results",
"title": "Open LLM Leaderboard Model Comparator"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 130,
"daysSinceModification": 32,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | null |
66f2396eb26f416f21fb41eb | opencompass/openvlm_video_leaderboard | opencompass | openvlm_video_leaderboard | 2024-09-24T04:00:46 | 2024-11-21 13:35:06 | 1 | 99 | [
"judge:model",
"modality:video",
"judge:function",
"gradio",
"test:public",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.0",
"short_description": "VLMEvalKit Eval Results in video understanding benchmark",
"title": "Open VLM Video Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 140,
"daysSinceModification": 82,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
66b39fac7fa3c7cdc13f482f | Vikhrmodels/small-shlepa-lb | Vikhrmodels | small-shlepa-lb | 2024-08-07T16:24:12 | 2024-08-08 14:31:15 | 1 | 13 | [
"language:russian",
"language:english",
"judge:function",
"gradio",
"test:public",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": "Evaluate LLMs using russian MC tasks",
"title": "Small Shlepa Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 187,
"daysSinceModification": 186,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
65b3ccae16301f403033baac | logikon/open_cot_leaderboard | logikon | open_cot_leaderboard | 2024-01-26T15:15:58 | 2024-11-02 11:06:00 | 0 | 50 | [
"chain-of-thought",
"modality:text",
"eval:generation",
"gradio",
"submission:automatic",
"test:public",
"CoT",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "yellow",
"duplicated_from": "logikon/open_cot_leaderboard",
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.36.0",
"short_description": "Track, rank and evaluate open LLMs' CoT quality",
"title": "Open CoT Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 381,
"daysSinceModification": 101,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates Large Language Models' chain-of-thought reasoning performance across multiple logical reasoning tasks. |
648b40be34fee97b500a7975 | ml-energy/leaderboard | ml-energy | leaderboard | 2023-06-15T16:47:58 | 2024-10-04 17:57:22 | 0 | 8 | [
"eval:performance",
"modality:text",
"modality:video",
"modality:image",
"gradio",
"submission:automatic",
"test:public",
"energy",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": null,
"colorTo": null,
"duplicated_from": null,
"emoji": "β‘",
"license": null,
"pinned": true,
"sdk": "gradio",
"sdk_version": "3.39.0",
"short_description": null,
"title": "ML.ENERGY Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 606,
"daysSinceModification": 129,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates GenAI models' energy consumption and inference performance |
65a2d7dcb4f188a4db12dc94 | NPHardEval/NPHardEval-leaderboard | NPHardEval | NPHardEval-leaderboard | 2024-01-13T18:35:08 | 2024-02-05 22:44:01 | 0 | 52 | [
"modality:text",
"eval:code",
"eval:math",
"gradio",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.4.0",
"short_description": null,
"title": "NPHardEval Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 394,
"daysSinceModification": 371,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates LLM reasoning on computational complexity problems. |
65fdbb08b9d70ef8298cd350 | antoinelouis/decouvrir | antoinelouis | decouvrir | 2024-03-22T17:08:24 | 2024-09-03 13:11:14 | 0 | 10 | [
"eval:rag",
"modality:text",
"submission:manual",
"gradio",
"modality:artifact",
"language:french",
"language:French",
"leaderboard",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.21.0",
"short_description": "Leaderboard of information retrieval models in French",
"title": "DΓ©couvrIR"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": [
"french"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": null,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": null,
"submission": [
"manual"
],
"test": null
},
"daysSinceCreation": 325,
"daysSinceModification": 161,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | Evaluates French IR models performance on passage retrieval tasks |
66a102fb7ba13bb9bb641f7c | ai-forever/LIBRA-Leaderboard | ai-forever | LIBRA-Leaderboard | 2024-07-24T13:34:51 | 2024-11-18 08:26:17 | 0 | 7 | [
"language:russian",
"gradio",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "indigo",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": "LLM long context benchmark",
"title": "LIBRA Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 202,
"daysSinceModification": 85,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
662e0e6445eb426d06590b55 | speakleash/mt-bench-pl | speakleash | mt-bench-pl | 2024-04-28T08:52:52 | 2024-10-25 19:54:43 | 0 | 20 | [
"judge:model",
"modality:text",
"eval:generation",
"eval:code",
"submission:manual",
"gradio",
"language:polish",
"eval:math",
"test:public",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "ππ΅π±",
"license": "other",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.31.4",
"short_description": null,
"title": "MT Bench PL"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"polish"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": [
"model"
],
"submission": [
"manual"
],
"test": null
},
"daysSinceCreation": 289,
"daysSinceModification": 108,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": true,
"isRunning": true
},
"score": 3
}
} | approved | Evaluates Polish LLM capabilities across diverse linguistic and cognitive tasks using a specialized, culturally-adapted benchmarking methodology. |
65f42c08e364a7d45b73f76c | sparse-generative-ai/open-moe-llm-leaderboard | sparse-generative-ai | open-moe-llm-leaderboard | 2024-03-15T11:07:52 | 2024-08-13 09:30:40 | 0 | 32 | [
"eval:performance",
"modality:text",
"eval:generation",
"eval:math",
"gradio",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2024-08-26T08:47:37.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.26.0",
"short_description": null,
"title": "OPEN-MOE-LLM-LEADERBOARD"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 333,
"daysSinceModification": 182,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | A leaderboard evaluating performance and efficiency metrics of open-source Mixture of Experts (MoE) LLMs across multiple benchmarks. |
6646ab59c02ec140e973a6b7 | openreviewer/reviewer-arena | openreviewer | reviewer-arena | 2024-05-17T00:56:57 | 2024-05-22 22:03:59 | 0 | 9 | [
"gradio",
"modality:text",
"judge:humans"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": null,
"colorTo": null,
"duplicated_from": null,
"emoji": null,
"license": null,
"pinned": null,
"sdk": "gradio",
"sdk_version": "4.31.0",
"short_description": null,
"title": "reviewer-arena"
} | [
"arena"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 270,
"daysSinceModification": 264,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A crowd-sourced LLM benchmark for academic paper reviews. |
6697c434e8ec15fafe46b5b7 | MIMIC-CDM/leaderboard | MIMIC-CDM | leaderboard | 2024-07-17T13:16:36 | 2025-02-10 17:00:12 | 0 | 5 | [
"modality:text",
"eval:generation",
"clinical decision making",
"submission:manual",
"language:English",
"gradio",
"test:public",
"medicine",
"region:us",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2025-02-10T15:52:55.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": null,
"title": "MIMIC CDM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": [
"generation"
],
"language": [
"english"
],
"modality": [
"text"
]
},
"categoryCounts": {
"eval": 1,
"language": 1,
"modality": 1
},
"categoryValues": {
"judge": [
"auto"
],
"submission": [
"manual"
],
"test": [
"public"
]
},
"daysSinceCreation": 209,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": true,
"isRunning": true
},
"score": 4
}
} | approved | null |
665e7241f8cb81b0a476eccb | ArtificialAnalysis/Text-to-Image-Leaderboard | ArtificialAnalysis | Text-to-Image-Leaderboard | 2024-06-04T01:47:45 | 2024-06-16 20:06:00 | 9 | 341 | [
"static"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "green",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": null,
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "Text To Image Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 252,
"daysSinceModification": 239,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
662e9e1efa3959cbe30a35a6 | ArtificialAnalysis/LLM-Performance-Leaderboard | ArtificialAnalysis | LLM-Performance-Leaderboard | 2024-04-28T19:06:06 | 2024-06-11 20:46:38 | 3 | 276 | [
"eval:performance",
"modality:text",
"submission:manual",
"test:private",
"static",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "purple",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π¨",
"license": null,
"pinned": false,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "LLM Performance Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 288,
"daysSinceModification": 244,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | A benchmark that evaluates LLM API providers by measuring their performance metrics including latency, speed, and quality across different workload scenarios. |
66039ba97650c6c4369aceb8 | instructkr/LogicKor-leaderboard | instructkr | LogicKor-leaderboard | 2024-03-27T04:08:09 | 2024-03-27 04:13:08 | 2 | 36 | [
"modality:text",
"eval:generation",
"language:korean",
"static",
"test:public"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "yellow",
"colorTo": "red",
"duplicated_from": null,
"emoji": "π₯π",
"license": "apache-2.0",
"pinned": true,
"sdk": "static",
"sdk_version": null,
"short_description": null,
"title": "LogicKor Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 321,
"daysSinceModification": 321,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Evaluates Korean Large Language Models' performance across multiple reasoning and language tasks.
This project is not maintained anymore. |
670ed70fd75f1143525d9a33 | latticeflow/compl-ai-board | latticeflow | compl-ai-board | 2024-10-15T20:56:47 | 2024-12-02 14:06:52 | 0 | 24 | [
"domain:legal",
"eval:safety",
"modality:text",
"gradio",
"submission:automatic",
"test:public",
"leaderboard",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2024-10-16T13:12:52.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.4.0",
"short_description": null,
"title": "EU AI Act Compliance Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 118,
"daysSinceModification": 70,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 3
}
} | approved | Evaluates LLM compliance with EU AI Act technical requirements & safety standards. |
6759454b7ce296e512352de2 | nyunai/edge-llm-leaderboard | nyunai | edge-llm-leaderboard | 2024-12-11T07:54:51 | 2024-12-16 12:50:13 | 0 | 18 | [
"llm edge leaderboard",
"edge",
"edge llm leaderboard",
"llm",
"gradio",
"leaderboard"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "blue",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "5.8.0",
"short_description": null,
"title": "Edge LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 62,
"daysSinceModification": 57,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
67587a5be374d9f14aa57a02 | kz-transformers/kaz-llm-lb | kz-transformers | kaz-llm-lb | 2024-12-10T17:28:59 | 2024-12-24 16:50:17 | 0 | 5 | [
"gradio",
"region:us",
"leaderboard",
"language:kazakh"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": "Evaluate LLMs using Kazakh MC tasks",
"title": "Kaz LLM Leaderboard"
} | [
"tag:leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 62,
"daysSinceModification": 48,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": true,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | null |
6738561119cbbe30918d6435 | PartAI/open-persian-llm-leaderboard | PartAI | open-persian-llm-leaderboard | 2024-11-16T08:21:37 | 2024-12-07 00:38:28 | 6 | 53 | [
"gradio",
"region:us",
"modality:text",
"language:persian"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "red",
"colorTo": "red",
"duplicated_from": null,
"emoji": "π
",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.42.0",
"short_description": "Open Persian LLM Leaderboard",
"title": "Open Persian LLM Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 87,
"daysSinceModification": 66,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65b0a64db233ea8ce65f0bc5 | echo840/ocrbench-leaderboard | echo840 | ocrbench-leaderboard | 2024-01-24T05:55:25 | 2025-01-16 14:01:43 | 4 | 120 | [
"gradio"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "gray",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.15.0",
"short_description": null,
"title": "Ocrbench Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 384,
"daysSinceModification": 26,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
667b29b383f9e85330f260fa | vidore/vidore-leaderboard | vidore | vidore-leaderboard | 2024-06-25T20:33:55 | 2024-12-05 10:28:29 | 2 | 108 | [
"gradio"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "mit",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.37.1",
"short_description": null,
"title": "Vidore Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 230,
"daysSinceModification": 68,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
66b2e7ef523bf90aa7062503 | ThaiLLM-Leaderboard/leaderboard | ThaiLLM-Leaderboard | leaderboard | 2024-08-07T03:20:15 | 2024-11-16 12:07:12 | 2 | 43 | [
"judge:model",
"modality:text",
"eval:generation",
"submission:manual",
"language:thai",
"gradio",
"test:public",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2025-02-01T18:31:26.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.26.0",
"short_description": null,
"title": "Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 188,
"daysSinceModification": 87,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates Thai LLM capabilities across multiple linguistic benchmarks using diverse evaluation methods. |
6760f5d6470988736686f069 | opencompass/Open_LMM_Reasoning_Leaderboard | opencompass | Open_LMM_Reasoning_Leaderboard | 2024-12-17T03:53:58 | 2025-02-10 13:36:44 | 2 | 28 | [
"modality:image",
"judge:function",
"gradio",
"eval:math",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": null,
"short_description": "A Leaderboard that demonstrates LMM reasoning capabilities",
"title": "Open LMM Reasoning Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 56,
"daysSinceModification": 1,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
65adcd10d6b10af9119fc960 | Vchitect/VBench_Leaderboard | Vchitect | VBench_Leaderboard | 2024-01-22T02:04:00 | 2025-01-23 06:16:37 | 7 | 169 | [
"gradio",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "indigo",
"colorTo": "pink",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "gradio",
"sdk_version": "4.36.1",
"short_description": null,
"title": "VBench Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 386,
"daysSinceModification": 19,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
66231fbfd323727f81a5bbec | SeaLLMs/LLM_Leaderboard_for_SEA | SeaLLMs | LLM_Leaderboard_for_SEA | 2024-04-20T01:51:59 | 2024-12-10 12:29:34 | 2 | 18 | [
"gradio"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "yellow",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.27.0",
"short_description": null,
"title": "LLM Leaderboard for SEA"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 297,
"daysSinceModification": 63,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
673517eae90a2e0b0d784972 | ServiceNow/browsergym-leaderboard | ServiceNow | browsergym-leaderboard | 2024-11-13T21:19:38 | 2025-02-07 21:08:13 | 2 | 12 | [
"modality:agent",
"docker",
"region:us"
] | {} | null | RUNNING | {
"app_file": null,
"colorFrom": "purple",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π",
"license": "mit",
"pinned": false,
"sdk": "docker",
"sdk_version": null,
"short_description": null,
"title": "BrowserGym Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 89,
"daysSinceModification": 3,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | null |
67909d72a1832c8a7cdd4599 | galileo-ai/agent-leaderboard | galileo-ai | agent-leaderboard | 2025-01-22T07:25:38 | 2025-02-10 14:59:46 | 2 | 33 | [
"modality:agent",
"eval:generation",
"judge:function",
"gradio",
"region:us"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "yellow",
"colorTo": "purple",
"duplicated_from": null,
"emoji": "π¬",
"license": "apache-2.0",
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.0.1",
"short_description": "Ranking of LLMs for agentic tasks",
"title": "Agent Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 20,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Evaluating LLM capabilities in tool usage and functions |
677f99fe8d5985fec9dcaea3 | omlab/open-agent-leaderboard | omlab | open-agent-leaderboard | 2025-01-09T09:42:22 | 2025-02-11 05:53:58 | 2 | 13 | [
"eval:performance",
"modality:text",
"modality:agent",
"gradio",
"eval:math",
"submission:semiautomatic",
"region:us",
"judge:auto"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "blue",
"colorTo": "green",
"duplicated_from": null,
"emoji": "π₯",
"license": "mit",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.44.1",
"short_description": "Open Agent Leaderboard",
"title": "Open Agent Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 33,
"daysSinceModification": 0,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": true,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Compares the math reasoning capabilities and performance of conversational agents. |
647c02aeb31514a4a6ed3fe1 | uonlp/open_multilingual_llm_leaderboard | uonlp | open_multilingual_llm_leaderboard | 2023-06-04T03:19:10 | 2024-11-23 18:57:01 | 1 | 52 | [
"language:vietnamese",
"language:portuguese",
"language:telugu",
"language:italian",
"language:danish",
"language:hindi",
"language:chinese",
"language:catalan",
"language:french",
"language:ukrainian",
"eval:generation",
"language:dutch",
"submission:manual",
"language:romanian",
"language:swedish",
"gradio",
"language:indonesian",
"language:nepali",
"language:serbian",
"language:malayalam",
"language:armenian",
"language:croatian",
"language:german",
"language:arabic",
"language:bengali",
"language:kannada",
"language:russian",
"modality:text",
"language:marathi",
"language:basque",
"language:gujarati",
"language:hungarian",
"language:slovak",
"language:tamil",
"language:spanish",
"test:public"
] | {} | null | RUNNING | {
"app_file": "app.py",
"colorFrom": "purple",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π¨",
"license": null,
"pinned": false,
"sdk": "gradio",
"sdk_version": "5.5.0",
"short_description": null,
"title": "Open Multilingual Llm Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 618,
"daysSinceModification": 79,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": false,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": false,
"hasTags": false,
"isRunning": true
},
"score": 1
}
} | approved | Evaluates Large Language Models' performance across 31 diverse languages using standardized benchmarks. |
65d70863ef58a69470ead2fc | openlifescienceai/open_medical_llm_leaderboard | openlifescienceai | open_medical_llm_leaderboard | 2024-02-22T08:40:03 | 2025-01-29 06:03:29 | 10 | 337 | [
"modality:text",
"eval:generation",
"gradio",
"domain:medical",
"submission:automatic",
"test:public",
"region:us",
"judge:auto"
] | {} | {
"results": {
"last_modified": "2025-01-29T05:54:02.000Z"
}
} | RUNNING | {
"app_file": "app.py",
"colorFrom": "green",
"colorTo": "indigo",
"duplicated_from": null,
"emoji": "π₯",
"license": "apache-2.0",
"pinned": true,
"sdk": "gradio",
"sdk_version": "4.4.0",
"short_description": null,
"title": "Open Medical-LLM Leaderboard"
} | [
"leaderboard"
] | {
"categoryAllValues": {
"eval": null,
"language": null,
"modality": null
},
"categoryCounts": {
"eval": null,
"language": null,
"modality": null
},
"categoryValues": {
"judge": null,
"submission": null,
"test": null
},
"daysSinceCreation": 355,
"daysSinceModification": 13,
"daysSinceResultsUpdate": null,
"hasRecentResults": false,
"hasResults": true,
"isNew": false,
"isRecentlyUpdated": false,
"quality": {
"flags": {
"hasLeaderboardOrArenaTag": false,
"hasRecentResults": false,
"hasResults": true,
"hasTags": false,
"isRunning": true
},
"score": 2
}
} | approved | Evaluates LLMs across a diverse array of medical datasets. |
End of preview.
No dataset card yet
- Downloads last month
- 1,716