merve HF staff commited on
Commit
b4c35fc
1 Parent(s): ed98a81

Fix task tags

Browse files
Files changed (1) hide show
  1. README.md +199 -199
README.md CHANGED
@@ -1,209 +1,209 @@
1
  ---
2
  license: apache-2.0
3
  datasets:
4
- - lmms-lab/LLaVA-OneVision-Data
5
  language:
6
- - en
7
- - zh
8
  metrics:
9
- - accuracy
10
  library_name: transformers
11
  tags:
12
- - multimodal
13
-
14
  model-index:
15
- - name: llava-onevision-qwen-72b-si
16
- results:
17
- - task:
18
- type: multimodal
19
- dataset:
20
- type: ai2d
21
- name: AI2D
22
- metrics:
23
- - name: accuracy
24
- type: accuracy
25
- value: 85.1
26
- verified: true
27
- - task:
28
- type: multimodal
29
- dataset:
30
- type: chartqa
31
- name: ChartQA
32
- metrics:
33
- - name: accuracy
34
- type: accuracy
35
- value: 84.9
36
- verified: true
37
- - task:
38
- type: multimodal
39
- dataset:
40
- type: docvqa
41
- name: DocVQA
42
- metrics:
43
- - name: accuracy
44
- type: accuracy
45
- value: 93.5
46
- verified: true
47
- - task:
48
- type: multimodal
49
- dataset:
50
- type: infovqa
51
- name: InfoVQA
52
- metrics:
53
- - name: accuracy
54
- type: accuracy
55
- value: 77.7
56
- verified: true
57
- - task:
58
- type: multimodal
59
- dataset:
60
- type: mathverse
61
- name: MathVerse
62
- metrics:
63
- - name: accuracy
64
- type: accuracy
65
- value: 37.7
66
- verified: true
67
- - task:
68
- type: multimodal
69
- dataset:
70
- type: mathvista
71
- name: MathVista
72
- metrics:
73
- - name: accuracy
74
- type: accuracy
75
- value: 66.5
76
- verified: true
77
- - task:
78
- type: multimodal
79
- dataset:
80
- type: mmbench
81
- name: MMBench
82
- metrics:
83
- - name: accuracy
84
- type: accuracy
85
- value: 86.6
86
- verified: true
87
- - task:
88
- type: multimodal
89
- dataset:
90
- type: mme
91
- name: MME
92
- metrics:
93
- - name: score
94
- type: score
95
- value: 2269
96
- verified: true
97
- - task:
98
- type: multimodal
99
- dataset:
100
- type: mmmu
101
- name: MMMU
102
- metrics:
103
- - name: accuracy
104
- type: accuracy
105
- value: 57.4
106
- verified: true
107
- - task:
108
- type: multimodal
109
- dataset:
110
- type: mmvet
111
- name: MMVet
112
- metrics:
113
- - name: accuracy
114
- type: accuracy
115
- value: 60.0
116
- verified: true
117
- - task:
118
- type: multimodal
119
- dataset:
120
- type: mmstar
121
- name: MMStar
122
- metrics:
123
- - name: accuracy
124
- type: accuracy
125
- value: 65.2
126
- verified: true
127
- - task:
128
- type: multimodal
129
- dataset:
130
- type: seed-bench
131
- name: Seed-Bench
132
- metrics:
133
- - name: accuracy
134
- type: accuracy
135
- value: 77.6
136
- verified: true
137
- - task:
138
- type: multimodal
139
- dataset:
140
- type: science-qa
141
- name: Science-QA
142
- metrics:
143
- - name: accuracy
144
- type: accuracy
145
- value: 91.3
146
- verified: true
147
- - task:
148
- type: multimodal
149
- dataset:
150
- type: imagedc
151
- name: ImageDC
152
- metrics:
153
- - name: accuracy
154
- type: accuracy
155
- value: 91.5
156
- verified: true
157
- - task:
158
- type: multimodal
159
- dataset:
160
- type: mmlbench
161
- name: MMLBench
162
- metrics:
163
- - name: accuracy
164
- type: accuracy
165
- value: 84.4
166
- verified: true
167
- - task:
168
- type: multimodal
169
- dataset:
170
- type: realworldqa
171
- name: RealWorldQA
172
- metrics:
173
- - name: accuracy
174
- type: accuracy
175
- value: 73.8
176
- verified: true
177
- - task:
178
- type: multimodal
179
- dataset:
180
- type: vibe-eval
181
- name: Vibe-Eval
182
- metrics:
183
- - name: accuracy
184
- type: accuracy
185
- value: 46.7
186
- verified: true
187
- - task:
188
- type: multimodal
189
- dataset:
190
- type: llava-w
191
- name: LLaVA-W
192
- metrics:
193
- - name: accuracy
194
- type: accuracy
195
- value: 93.7
196
- verified: true
197
- - task:
198
- type: multimodal
199
- dataset:
200
- type: l-wilder
201
- name: LLaVA-Wilder
202
- metrics:
203
- - name: accuracy
204
- type: accuracy
205
- value: 72.9
206
- verified: true
 
207
  ---
208
 
209
  # LLaVA-OneVision
@@ -316,4 +316,4 @@ print(text_outputs)
316
  @article{li2024llavaonevision,
317
  title={LLaVA-OneVision},
318
  }
319
- ```
 
1
  ---
2
  license: apache-2.0
3
  datasets:
4
+ - lmms-lab/LLaVA-OneVision-Data
5
  language:
6
+ - en
7
+ - zh
8
  metrics:
9
+ - accuracy
10
  library_name: transformers
11
  tags:
12
+ - multimodal
 
13
  model-index:
14
+ - name: llava-onevision-qwen-72b-si
15
+ results:
16
+ - task:
17
+ type: multimodal
18
+ dataset:
19
+ type: ai2d
20
+ name: AI2D
21
+ metrics:
22
+ - name: accuracy
23
+ type: accuracy
24
+ value: 85.1
25
+ verified: true
26
+ - task:
27
+ type: multimodal
28
+ dataset:
29
+ type: chartqa
30
+ name: ChartQA
31
+ metrics:
32
+ - name: accuracy
33
+ type: accuracy
34
+ value: 84.9
35
+ verified: true
36
+ - task:
37
+ type: multimodal
38
+ dataset:
39
+ type: docvqa
40
+ name: DocVQA
41
+ metrics:
42
+ - name: accuracy
43
+ type: accuracy
44
+ value: 93.5
45
+ verified: true
46
+ - task:
47
+ type: multimodal
48
+ dataset:
49
+ type: infovqa
50
+ name: InfoVQA
51
+ metrics:
52
+ - name: accuracy
53
+ type: accuracy
54
+ value: 77.7
55
+ verified: true
56
+ - task:
57
+ type: multimodal
58
+ dataset:
59
+ type: mathverse
60
+ name: MathVerse
61
+ metrics:
62
+ - name: accuracy
63
+ type: accuracy
64
+ value: 37.7
65
+ verified: true
66
+ - task:
67
+ type: multimodal
68
+ dataset:
69
+ type: mathvista
70
+ name: MathVista
71
+ metrics:
72
+ - name: accuracy
73
+ type: accuracy
74
+ value: 66.5
75
+ verified: true
76
+ - task:
77
+ type: multimodal
78
+ dataset:
79
+ type: mmbench
80
+ name: MMBench
81
+ metrics:
82
+ - name: accuracy
83
+ type: accuracy
84
+ value: 86.6
85
+ verified: true
86
+ - task:
87
+ type: multimodal
88
+ dataset:
89
+ type: mme
90
+ name: MME
91
+ metrics:
92
+ - name: score
93
+ type: score
94
+ value: 2269
95
+ verified: true
96
+ - task:
97
+ type: multimodal
98
+ dataset:
99
+ type: mmmu
100
+ name: MMMU
101
+ metrics:
102
+ - name: accuracy
103
+ type: accuracy
104
+ value: 57.4
105
+ verified: true
106
+ - task:
107
+ type: multimodal
108
+ dataset:
109
+ type: mmvet
110
+ name: MMVet
111
+ metrics:
112
+ - name: accuracy
113
+ type: accuracy
114
+ value: 60
115
+ verified: true
116
+ - task:
117
+ type: multimodal
118
+ dataset:
119
+ type: mmstar
120
+ name: MMStar
121
+ metrics:
122
+ - name: accuracy
123
+ type: accuracy
124
+ value: 65.2
125
+ verified: true
126
+ - task:
127
+ type: multimodal
128
+ dataset:
129
+ type: seed-bench
130
+ name: Seed-Bench
131
+ metrics:
132
+ - name: accuracy
133
+ type: accuracy
134
+ value: 77.6
135
+ verified: true
136
+ - task:
137
+ type: multimodal
138
+ dataset:
139
+ type: science-qa
140
+ name: Science-QA
141
+ metrics:
142
+ - name: accuracy
143
+ type: accuracy
144
+ value: 91.3
145
+ verified: true
146
+ - task:
147
+ type: multimodal
148
+ dataset:
149
+ type: imagedc
150
+ name: ImageDC
151
+ metrics:
152
+ - name: accuracy
153
+ type: accuracy
154
+ value: 91.5
155
+ verified: true
156
+ - task:
157
+ type: multimodal
158
+ dataset:
159
+ type: mmlbench
160
+ name: MMLBench
161
+ metrics:
162
+ - name: accuracy
163
+ type: accuracy
164
+ value: 84.4
165
+ verified: true
166
+ - task:
167
+ type: multimodal
168
+ dataset:
169
+ type: realworldqa
170
+ name: RealWorldQA
171
+ metrics:
172
+ - name: accuracy
173
+ type: accuracy
174
+ value: 73.8
175
+ verified: true
176
+ - task:
177
+ type: multimodal
178
+ dataset:
179
+ type: vibe-eval
180
+ name: Vibe-Eval
181
+ metrics:
182
+ - name: accuracy
183
+ type: accuracy
184
+ value: 46.7
185
+ verified: true
186
+ - task:
187
+ type: multimodal
188
+ dataset:
189
+ type: llava-w
190
+ name: LLaVA-W
191
+ metrics:
192
+ - name: accuracy
193
+ type: accuracy
194
+ value: 93.7
195
+ verified: true
196
+ - task:
197
+ type: multimodal
198
+ dataset:
199
+ type: l-wilder
200
+ name: LLaVA-Wilder
201
+ metrics:
202
+ - name: accuracy
203
+ type: accuracy
204
+ value: 72.9
205
+ verified: true
206
+ pipeline_tag: image-text-to-text
207
  ---
208
 
209
  # LLaVA-OneVision
 
316
  @article{li2024llavaonevision,
317
  title={LLaVA-OneVision},
318
  }
319
+ ```