sanghol commited on
Commit
2d4f65a
·
verified ·
1 Parent(s): 97369a1

Upload Molmo2-8B

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<frame_end>": 151944,
6
+ "<frame_start>": 151943,
7
+ "<im_col>": 151939,
8
+ "<im_end>": 151937,
9
+ "<im_low>": 151942,
10
+ "<im_patch>": 151938,
11
+ "<im_start>": 151936,
12
+ "<low_res_im_start>": 151940,
13
+ "<think>": 151667,
14
+ "<tool_call>": 151657,
15
+ "<tool_response>": 151665,
16
+ "<|box_end|>": 151649,
17
+ "<|box_start|>": 151648,
18
+ "<|endoftext|>": 151643,
19
+ "<|file_sep|>": 151664,
20
+ "<|fim_middle|>": 151660,
21
+ "<|fim_pad|>": 151662,
22
+ "<|fim_prefix|>": 151659,
23
+ "<|fim_suffix|>": 151661,
24
+ "<|im_end|>": 151645,
25
+ "<|im_start|>": 151644,
26
+ "<|image_pad|>": 151655,
27
+ "<|image|>": 151941,
28
+ "<|object_ref_end|>": 151647,
29
+ "<|object_ref_start|>": 151646,
30
+ "<|quad_end|>": 151651,
31
+ "<|quad_start|>": 151650,
32
+ "<|repo_name|>": 151663,
33
+ "<|video_pad|>": 151656,
34
+ "<|video|>": 151945,
35
+ "<|vision_end|>": 151653,
36
+ "<|vision_pad|>": 151654,
37
+ "<|vision_start|>": 151652,
38
+ "|<EXTRA_TOKENS_0>|": 151669,
39
+ "|<EXTRA_TOKENS_100>|": 151769,
40
+ "|<EXTRA_TOKENS_101>|": 151770,
41
+ "|<EXTRA_TOKENS_102>|": 151771,
42
+ "|<EXTRA_TOKENS_103>|": 151772,
43
+ "|<EXTRA_TOKENS_104>|": 151773,
44
+ "|<EXTRA_TOKENS_105>|": 151774,
45
+ "|<EXTRA_TOKENS_106>|": 151775,
46
+ "|<EXTRA_TOKENS_107>|": 151776,
47
+ "|<EXTRA_TOKENS_108>|": 151777,
48
+ "|<EXTRA_TOKENS_109>|": 151778,
49
+ "|<EXTRA_TOKENS_10>|": 151679,
50
+ "|<EXTRA_TOKENS_110>|": 151779,
51
+ "|<EXTRA_TOKENS_111>|": 151780,
52
+ "|<EXTRA_TOKENS_112>|": 151781,
53
+ "|<EXTRA_TOKENS_113>|": 151782,
54
+ "|<EXTRA_TOKENS_114>|": 151783,
55
+ "|<EXTRA_TOKENS_115>|": 151784,
56
+ "|<EXTRA_TOKENS_116>|": 151785,
57
+ "|<EXTRA_TOKENS_117>|": 151786,
58
+ "|<EXTRA_TOKENS_118>|": 151787,
59
+ "|<EXTRA_TOKENS_119>|": 151788,
60
+ "|<EXTRA_TOKENS_11>|": 151680,
61
+ "|<EXTRA_TOKENS_120>|": 151789,
62
+ "|<EXTRA_TOKENS_121>|": 151790,
63
+ "|<EXTRA_TOKENS_122>|": 151791,
64
+ "|<EXTRA_TOKENS_123>|": 151792,
65
+ "|<EXTRA_TOKENS_124>|": 151793,
66
+ "|<EXTRA_TOKENS_125>|": 151794,
67
+ "|<EXTRA_TOKENS_126>|": 151795,
68
+ "|<EXTRA_TOKENS_127>|": 151796,
69
+ "|<EXTRA_TOKENS_128>|": 151797,
70
+ "|<EXTRA_TOKENS_129>|": 151798,
71
+ "|<EXTRA_TOKENS_12>|": 151681,
72
+ "|<EXTRA_TOKENS_130>|": 151799,
73
+ "|<EXTRA_TOKENS_131>|": 151800,
74
+ "|<EXTRA_TOKENS_132>|": 151801,
75
+ "|<EXTRA_TOKENS_133>|": 151802,
76
+ "|<EXTRA_TOKENS_134>|": 151803,
77
+ "|<EXTRA_TOKENS_135>|": 151804,
78
+ "|<EXTRA_TOKENS_136>|": 151805,
79
+ "|<EXTRA_TOKENS_137>|": 151806,
80
+ "|<EXTRA_TOKENS_138>|": 151807,
81
+ "|<EXTRA_TOKENS_139>|": 151808,
82
+ "|<EXTRA_TOKENS_13>|": 151682,
83
+ "|<EXTRA_TOKENS_140>|": 151809,
84
+ "|<EXTRA_TOKENS_141>|": 151810,
85
+ "|<EXTRA_TOKENS_142>|": 151811,
86
+ "|<EXTRA_TOKENS_143>|": 151812,
87
+ "|<EXTRA_TOKENS_144>|": 151813,
88
+ "|<EXTRA_TOKENS_145>|": 151814,
89
+ "|<EXTRA_TOKENS_146>|": 151815,
90
+ "|<EXTRA_TOKENS_147>|": 151816,
91
+ "|<EXTRA_TOKENS_148>|": 151817,
92
+ "|<EXTRA_TOKENS_149>|": 151818,
93
+ "|<EXTRA_TOKENS_14>|": 151683,
94
+ "|<EXTRA_TOKENS_150>|": 151819,
95
+ "|<EXTRA_TOKENS_151>|": 151820,
96
+ "|<EXTRA_TOKENS_152>|": 151821,
97
+ "|<EXTRA_TOKENS_153>|": 151822,
98
+ "|<EXTRA_TOKENS_154>|": 151823,
99
+ "|<EXTRA_TOKENS_155>|": 151824,
100
+ "|<EXTRA_TOKENS_156>|": 151825,
101
+ "|<EXTRA_TOKENS_157>|": 151826,
102
+ "|<EXTRA_TOKENS_158>|": 151827,
103
+ "|<EXTRA_TOKENS_159>|": 151828,
104
+ "|<EXTRA_TOKENS_15>|": 151684,
105
+ "|<EXTRA_TOKENS_160>|": 151829,
106
+ "|<EXTRA_TOKENS_161>|": 151830,
107
+ "|<EXTRA_TOKENS_162>|": 151831,
108
+ "|<EXTRA_TOKENS_163>|": 151832,
109
+ "|<EXTRA_TOKENS_164>|": 151833,
110
+ "|<EXTRA_TOKENS_165>|": 151834,
111
+ "|<EXTRA_TOKENS_166>|": 151835,
112
+ "|<EXTRA_TOKENS_167>|": 151836,
113
+ "|<EXTRA_TOKENS_168>|": 151837,
114
+ "|<EXTRA_TOKENS_169>|": 151838,
115
+ "|<EXTRA_TOKENS_16>|": 151685,
116
+ "|<EXTRA_TOKENS_170>|": 151839,
117
+ "|<EXTRA_TOKENS_171>|": 151840,
118
+ "|<EXTRA_TOKENS_172>|": 151841,
119
+ "|<EXTRA_TOKENS_173>|": 151842,
120
+ "|<EXTRA_TOKENS_174>|": 151843,
121
+ "|<EXTRA_TOKENS_175>|": 151844,
122
+ "|<EXTRA_TOKENS_176>|": 151845,
123
+ "|<EXTRA_TOKENS_177>|": 151846,
124
+ "|<EXTRA_TOKENS_178>|": 151847,
125
+ "|<EXTRA_TOKENS_179>|": 151848,
126
+ "|<EXTRA_TOKENS_17>|": 151686,
127
+ "|<EXTRA_TOKENS_180>|": 151849,
128
+ "|<EXTRA_TOKENS_181>|": 151850,
129
+ "|<EXTRA_TOKENS_182>|": 151851,
130
+ "|<EXTRA_TOKENS_183>|": 151852,
131
+ "|<EXTRA_TOKENS_184>|": 151853,
132
+ "|<EXTRA_TOKENS_185>|": 151854,
133
+ "|<EXTRA_TOKENS_186>|": 151855,
134
+ "|<EXTRA_TOKENS_187>|": 151856,
135
+ "|<EXTRA_TOKENS_188>|": 151857,
136
+ "|<EXTRA_TOKENS_189>|": 151858,
137
+ "|<EXTRA_TOKENS_18>|": 151687,
138
+ "|<EXTRA_TOKENS_190>|": 151859,
139
+ "|<EXTRA_TOKENS_191>|": 151860,
140
+ "|<EXTRA_TOKENS_192>|": 151861,
141
+ "|<EXTRA_TOKENS_193>|": 151862,
142
+ "|<EXTRA_TOKENS_194>|": 151863,
143
+ "|<EXTRA_TOKENS_195>|": 151864,
144
+ "|<EXTRA_TOKENS_196>|": 151865,
145
+ "|<EXTRA_TOKENS_197>|": 151866,
146
+ "|<EXTRA_TOKENS_198>|": 151867,
147
+ "|<EXTRA_TOKENS_199>|": 151868,
148
+ "|<EXTRA_TOKENS_19>|": 151688,
149
+ "|<EXTRA_TOKENS_1>|": 151670,
150
+ "|<EXTRA_TOKENS_200>|": 151869,
151
+ "|<EXTRA_TOKENS_201>|": 151870,
152
+ "|<EXTRA_TOKENS_202>|": 151871,
153
+ "|<EXTRA_TOKENS_203>|": 151872,
154
+ "|<EXTRA_TOKENS_204>|": 151873,
155
+ "|<EXTRA_TOKENS_205>|": 151874,
156
+ "|<EXTRA_TOKENS_206>|": 151875,
157
+ "|<EXTRA_TOKENS_207>|": 151876,
158
+ "|<EXTRA_TOKENS_208>|": 151877,
159
+ "|<EXTRA_TOKENS_209>|": 151878,
160
+ "|<EXTRA_TOKENS_20>|": 151689,
161
+ "|<EXTRA_TOKENS_210>|": 151879,
162
+ "|<EXTRA_TOKENS_211>|": 151880,
163
+ "|<EXTRA_TOKENS_212>|": 151881,
164
+ "|<EXTRA_TOKENS_213>|": 151882,
165
+ "|<EXTRA_TOKENS_214>|": 151883,
166
+ "|<EXTRA_TOKENS_215>|": 151884,
167
+ "|<EXTRA_TOKENS_216>|": 151885,
168
+ "|<EXTRA_TOKENS_217>|": 151886,
169
+ "|<EXTRA_TOKENS_218>|": 151887,
170
+ "|<EXTRA_TOKENS_219>|": 151888,
171
+ "|<EXTRA_TOKENS_21>|": 151690,
172
+ "|<EXTRA_TOKENS_220>|": 151889,
173
+ "|<EXTRA_TOKENS_221>|": 151890,
174
+ "|<EXTRA_TOKENS_222>|": 151891,
175
+ "|<EXTRA_TOKENS_223>|": 151892,
176
+ "|<EXTRA_TOKENS_224>|": 151893,
177
+ "|<EXTRA_TOKENS_225>|": 151894,
178
+ "|<EXTRA_TOKENS_226>|": 151895,
179
+ "|<EXTRA_TOKENS_227>|": 151896,
180
+ "|<EXTRA_TOKENS_228>|": 151897,
181
+ "|<EXTRA_TOKENS_229>|": 151898,
182
+ "|<EXTRA_TOKENS_22>|": 151691,
183
+ "|<EXTRA_TOKENS_230>|": 151899,
184
+ "|<EXTRA_TOKENS_231>|": 151900,
185
+ "|<EXTRA_TOKENS_232>|": 151901,
186
+ "|<EXTRA_TOKENS_233>|": 151902,
187
+ "|<EXTRA_TOKENS_234>|": 151903,
188
+ "|<EXTRA_TOKENS_235>|": 151904,
189
+ "|<EXTRA_TOKENS_236>|": 151905,
190
+ "|<EXTRA_TOKENS_237>|": 151906,
191
+ "|<EXTRA_TOKENS_238>|": 151907,
192
+ "|<EXTRA_TOKENS_239>|": 151908,
193
+ "|<EXTRA_TOKENS_23>|": 151692,
194
+ "|<EXTRA_TOKENS_240>|": 151909,
195
+ "|<EXTRA_TOKENS_241>|": 151910,
196
+ "|<EXTRA_TOKENS_242>|": 151911,
197
+ "|<EXTRA_TOKENS_243>|": 151912,
198
+ "|<EXTRA_TOKENS_244>|": 151913,
199
+ "|<EXTRA_TOKENS_245>|": 151914,
200
+ "|<EXTRA_TOKENS_246>|": 151915,
201
+ "|<EXTRA_TOKENS_247>|": 151916,
202
+ "|<EXTRA_TOKENS_248>|": 151917,
203
+ "|<EXTRA_TOKENS_249>|": 151918,
204
+ "|<EXTRA_TOKENS_24>|": 151693,
205
+ "|<EXTRA_TOKENS_250>|": 151919,
206
+ "|<EXTRA_TOKENS_251>|": 151920,
207
+ "|<EXTRA_TOKENS_252>|": 151921,
208
+ "|<EXTRA_TOKENS_253>|": 151922,
209
+ "|<EXTRA_TOKENS_254>|": 151923,
210
+ "|<EXTRA_TOKENS_255>|": 151924,
211
+ "|<EXTRA_TOKENS_256>|": 151925,
212
+ "|<EXTRA_TOKENS_257>|": 151926,
213
+ "|<EXTRA_TOKENS_258>|": 151927,
214
+ "|<EXTRA_TOKENS_259>|": 151928,
215
+ "|<EXTRA_TOKENS_25>|": 151694,
216
+ "|<EXTRA_TOKENS_260>|": 151929,
217
+ "|<EXTRA_TOKENS_261>|": 151930,
218
+ "|<EXTRA_TOKENS_262>|": 151931,
219
+ "|<EXTRA_TOKENS_263>|": 151932,
220
+ "|<EXTRA_TOKENS_264>|": 151933,
221
+ "|<EXTRA_TOKENS_265>|": 151934,
222
+ "|<EXTRA_TOKENS_266>|": 151935,
223
+ "|<EXTRA_TOKENS_26>|": 151695,
224
+ "|<EXTRA_TOKENS_27>|": 151696,
225
+ "|<EXTRA_TOKENS_28>|": 151697,
226
+ "|<EXTRA_TOKENS_29>|": 151698,
227
+ "|<EXTRA_TOKENS_2>|": 151671,
228
+ "|<EXTRA_TOKENS_30>|": 151699,
229
+ "|<EXTRA_TOKENS_31>|": 151700,
230
+ "|<EXTRA_TOKENS_32>|": 151701,
231
+ "|<EXTRA_TOKENS_33>|": 151702,
232
+ "|<EXTRA_TOKENS_34>|": 151703,
233
+ "|<EXTRA_TOKENS_35>|": 151704,
234
+ "|<EXTRA_TOKENS_36>|": 151705,
235
+ "|<EXTRA_TOKENS_37>|": 151706,
236
+ "|<EXTRA_TOKENS_38>|": 151707,
237
+ "|<EXTRA_TOKENS_39>|": 151708,
238
+ "|<EXTRA_TOKENS_3>|": 151672,
239
+ "|<EXTRA_TOKENS_40>|": 151709,
240
+ "|<EXTRA_TOKENS_41>|": 151710,
241
+ "|<EXTRA_TOKENS_42>|": 151711,
242
+ "|<EXTRA_TOKENS_43>|": 151712,
243
+ "|<EXTRA_TOKENS_44>|": 151713,
244
+ "|<EXTRA_TOKENS_45>|": 151714,
245
+ "|<EXTRA_TOKENS_46>|": 151715,
246
+ "|<EXTRA_TOKENS_47>|": 151716,
247
+ "|<EXTRA_TOKENS_48>|": 151717,
248
+ "|<EXTRA_TOKENS_49>|": 151718,
249
+ "|<EXTRA_TOKENS_4>|": 151673,
250
+ "|<EXTRA_TOKENS_50>|": 151719,
251
+ "|<EXTRA_TOKENS_51>|": 151720,
252
+ "|<EXTRA_TOKENS_52>|": 151721,
253
+ "|<EXTRA_TOKENS_53>|": 151722,
254
+ "|<EXTRA_TOKENS_54>|": 151723,
255
+ "|<EXTRA_TOKENS_55>|": 151724,
256
+ "|<EXTRA_TOKENS_56>|": 151725,
257
+ "|<EXTRA_TOKENS_57>|": 151726,
258
+ "|<EXTRA_TOKENS_58>|": 151727,
259
+ "|<EXTRA_TOKENS_59>|": 151728,
260
+ "|<EXTRA_TOKENS_5>|": 151674,
261
+ "|<EXTRA_TOKENS_60>|": 151729,
262
+ "|<EXTRA_TOKENS_61>|": 151730,
263
+ "|<EXTRA_TOKENS_62>|": 151731,
264
+ "|<EXTRA_TOKENS_63>|": 151732,
265
+ "|<EXTRA_TOKENS_64>|": 151733,
266
+ "|<EXTRA_TOKENS_65>|": 151734,
267
+ "|<EXTRA_TOKENS_66>|": 151735,
268
+ "|<EXTRA_TOKENS_67>|": 151736,
269
+ "|<EXTRA_TOKENS_68>|": 151737,
270
+ "|<EXTRA_TOKENS_69>|": 151738,
271
+ "|<EXTRA_TOKENS_6>|": 151675,
272
+ "|<EXTRA_TOKENS_70>|": 151739,
273
+ "|<EXTRA_TOKENS_71>|": 151740,
274
+ "|<EXTRA_TOKENS_72>|": 151741,
275
+ "|<EXTRA_TOKENS_73>|": 151742,
276
+ "|<EXTRA_TOKENS_74>|": 151743,
277
+ "|<EXTRA_TOKENS_75>|": 151744,
278
+ "|<EXTRA_TOKENS_76>|": 151745,
279
+ "|<EXTRA_TOKENS_77>|": 151746,
280
+ "|<EXTRA_TOKENS_78>|": 151747,
281
+ "|<EXTRA_TOKENS_79>|": 151748,
282
+ "|<EXTRA_TOKENS_7>|": 151676,
283
+ "|<EXTRA_TOKENS_80>|": 151749,
284
+ "|<EXTRA_TOKENS_81>|": 151750,
285
+ "|<EXTRA_TOKENS_82>|": 151751,
286
+ "|<EXTRA_TOKENS_83>|": 151752,
287
+ "|<EXTRA_TOKENS_84>|": 151753,
288
+ "|<EXTRA_TOKENS_85>|": 151754,
289
+ "|<EXTRA_TOKENS_86>|": 151755,
290
+ "|<EXTRA_TOKENS_87>|": 151756,
291
+ "|<EXTRA_TOKENS_88>|": 151757,
292
+ "|<EXTRA_TOKENS_89>|": 151758,
293
+ "|<EXTRA_TOKENS_8>|": 151677,
294
+ "|<EXTRA_TOKENS_90>|": 151759,
295
+ "|<EXTRA_TOKENS_91>|": 151760,
296
+ "|<EXTRA_TOKENS_92>|": 151761,
297
+ "|<EXTRA_TOKENS_93>|": 151762,
298
+ "|<EXTRA_TOKENS_94>|": 151763,
299
+ "|<EXTRA_TOKENS_95>|": 151764,
300
+ "|<EXTRA_TOKENS_96>|": 151765,
301
+ "|<EXTRA_TOKENS_97>|": 151766,
302
+ "|<EXTRA_TOKENS_98>|": 151767,
303
+ "|<EXTRA_TOKENS_99>|": 151768,
304
+ "|<EXTRA_TOKENS_9>|": 151678
305
+ }
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% set DEMO_STYLES = ['point_count','pointing','cosyn_point','user_qa','long_caption','short_caption','video_long_caption','video_short_caption','video_point_track_per_frame','video_point_track_start_end','video_point_track_all_frames','video_single_point_track_start_end','video_transcript','video_clip_caption_start_end','video_clip_caption_start_end_in_seconds','video_clip_transcript_start_end','video_clip_transcript_start_end_in_seconds','video_frame_caption_timestamp','video_frame_caption_timestamp_in_seconds','correction_qa','text_sft','video_point','video_point_count','video_count','video_count_point','multi_image_pointing','multi_image_counting','multi_image_point_then_count','multi_image_count_then_point','demo','a_okvqa_mc','ai2_diagram_no_letter','ai2_diagram','science_qa','multi_image_mc','multi_image_mc_exp','mantis_instruct_mc','video_multiple_choice','video_multiple_choice_count_without_pointing','video_multiple_choice_multiple_correct','video_multiple_choice_w_subtitle'] %}{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% set has_subtitle = messages and messages[0]['role'].lower() == 'subtitle' %}{% for message in messages %}{% if message['content'] is not string %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% elif content['type'] == 'video' or 'video' in content or 'video_url' in content %}{% set video_count.value = video_count.value + 1 %}{% endif %}{% endfor %}{% endif %}{% endfor %}{% if image_count.value == 1 %}{{ '<|image|>' }}{% elif image_count.value > 1 %}{% for i in range(image_count.value) %}{{ 'Image ' ~ (i + 1) ~ '<|image|>' }}{% endfor %}{% endif %}{% for _ in range(video_count.value) %}{{ '<|video|>' }}{% endfor %}{% if has_subtitle %}{{ messages[0]['content'] }}{% endif %}{% for message in messages %}{% set role = message['role'].lower() %}{% if role == 'subtitle' %}{% continue %}{% endif %}{% set conv_index = loop.index - (1 if has_subtitle else 0) %}{%- if (conv_index % 2 == 1 and role != 'user') or (conv_index % 2 == 0 and role != 'assistant') -%}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{%- endif -%}{% if message['content'] is string %}{% set text_content = message['content'] %}{% else %}{% set m = namespace(text='') %}{% for content in message['content'] %}{% if content['type'] == 'text' %}{% if content['style'] is defined and content['style'] not in DEMO_STYLES %}{% set seg = content['style'] ~ ': ' ~ content['text'] %}{% else %}{% set seg = content['text'] %}{% endif %}{% set m.text = m.text ~ ('' if not m.text else ' ') ~ seg %}{% endif %}{% endfor %}{% set text_content = m.text %}{% endif %}{% if role == 'user' %}{% if not (has_subtitle and loop.index == 2) and not (not has_subtitle and loop.first) %}{{ '<|im_end|>\n' }}{% endif %}{{ '<|im_start|>user\n' }}{{ text_content }}{{ '<|im_end|>\n' }}{% else %} {# assistant #}{{ '<|im_start|>assistant\n' }}{{ text_content }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}
config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_config": {
3
+ "attention_dropout": 0.0,
4
+ "attn_implementation": "sdpa",
5
+ "float32_attention": true,
6
+ "head_dim": 72,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 1152,
9
+ "image_feature_dropout": 0.0,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 12288,
12
+ "model_type": "molmo2",
13
+ "num_attention_heads": 16,
14
+ "num_key_value_heads": 16,
15
+ "pooling_attention_mask": true,
16
+ "residual_dropout": 0.0,
17
+ "text_hidden_size": 4096,
18
+ "vit_layers": [
19
+ -3,
20
+ -9
21
+ ]
22
+ },
23
+ "architectures": [
24
+ "Molmo2ForConditionalGeneration"
25
+ ],
26
+ "auto_map": {
27
+ "AutoConfig": "configuration_molmo2.Molmo2Config",
28
+ "AutoModelForImageTextToText": "modeling_molmo2.Molmo2ForConditionalGeneration"
29
+ },
30
+ "dtype": "float32",
31
+ "frame_end_token_id": 151944,
32
+ "frame_start_token_id": 151943,
33
+ "image_col_id": 151939,
34
+ "image_end_token_id": 151937,
35
+ "image_high_res_id": 151938,
36
+ "image_low_res_id": 151942,
37
+ "image_patch_id": 151938,
38
+ "image_start_token_id": 151936,
39
+ "initializer_range": 0.02,
40
+ "low_res_image_start_token_id": 151940,
41
+ "model_type": "molmo2",
42
+ "text_config": {
43
+ "additional_vocab_size": 128,
44
+ "attention_dropout": 0.0,
45
+ "attn_implementation": "sdpa",
46
+ "embedding_dropout": 0.0,
47
+ "head_dim": 128,
48
+ "hidden_act": "silu",
49
+ "hidden_size": 4096,
50
+ "initializer_range": 0.02,
51
+ "intermediate_size": 12288,
52
+ "layer_norm_eps": 1e-06,
53
+ "max_position_embeddings": 36864,
54
+ "model_type": "molmo2_text",
55
+ "norm_after": false,
56
+ "num_attention_heads": 32,
57
+ "num_hidden_layers": 36,
58
+ "num_key_value_heads": 8,
59
+ "qk_norm_type": "qwen3",
60
+ "qkv_bias": false,
61
+ "residual_dropout": 0.0,
62
+ "rope_scaling": null,
63
+ "rope_scaling_layers": null,
64
+ "rope_theta": 1000000.0,
65
+ "use_cache": true,
66
+ "use_qk_norm": true,
67
+ "vocab_size": 151936
68
+ },
69
+ "tie_word_embeddings": false,
70
+ "transformers_version": "4.57.1",
71
+ "use_cache": true,
72
+ "use_frame_special_tokens": false,
73
+ "vit_config": {
74
+ "attention_dropout": 0.0,
75
+ "attn_implementation": "sdpa",
76
+ "float32_attention": true,
77
+ "head_dim": 72,
78
+ "hidden_act": "gelu_pytorch_tanh",
79
+ "hidden_size": 1152,
80
+ "image_default_input_size": [
81
+ 378,
82
+ 378
83
+ ],
84
+ "image_num_pos": 729,
85
+ "image_patch_size": 14,
86
+ "initializer_range": 0.02,
87
+ "intermediate_size": 4304,
88
+ "layer_norm_eps": 1e-06,
89
+ "model_type": "molmo2",
90
+ "num_attention_heads": 16,
91
+ "num_hidden_layers": 27,
92
+ "num_key_value_heads": 16,
93
+ "residual_dropout": 0.0
94
+ }
95
+ }
configuration_molmo2.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Optional, Any
6
+
7
+ from transformers import PretrainedConfig
8
+ from transformers.modeling_rope_utils import rope_config_validation
9
+ from transformers.utils import logging
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class Molmo2VitConfig(PretrainedConfig):
15
+ r"""
16
+ This is the configuration class to store the configuration of a [`Molmo2VisionTransformer`].
17
+ It is used to instantiate a `Molmo2VisionTransformer` according to the specified arguments,
18
+ defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Example:
24
+ ```python
25
+ >>> from transformers import Molmo2VitConfig, Molmo2VisionTransformer
26
+
27
+ >>> # Initializing a Molmo2VitConfig
28
+ >>> configuration = Molmo2VitConfig()
29
+
30
+ >>> # Initializing a Molmo2VisionTransformer (with random weights)
31
+ >>> model = Molmo2VisionTransformer(configuration)
32
+
33
+ >>> # Accessing the model configuration
34
+ >>> configuration = model.config
35
+ ```"""
36
+
37
+ model_type = "molmo2"
38
+ base_config_key = "vit_config"
39
+
40
+ def __init__(
41
+ self,
42
+ hidden_size: int = 1152,
43
+ intermediate_size: int = 4304,
44
+ num_hidden_layers: int = 27,
45
+ num_attention_heads: int = 16,
46
+ num_key_value_heads: int = 16,
47
+ head_dim: int = 72,
48
+ hidden_act: str = "gelu_pytorch_tanh",
49
+ layer_norm_eps: float = 1e-6,
50
+ image_default_input_size: tuple[int, int] = (378, 378),
51
+ image_patch_size: int = 14,
52
+ image_num_pos: int = 577,
53
+ attention_dropout: float = 0.0,
54
+ residual_dropout: float = 0.0,
55
+ initializer_range: float = 0.02,
56
+ float32_attention: bool = True,
57
+ attn_implementation: str = "eager",
58
+ **kwargs,
59
+ ):
60
+ self.attn_implementation = attn_implementation
61
+ super().__init__(
62
+ attn_implementation=attn_implementation,
63
+ **kwargs
64
+ )
65
+ self.hidden_size = hidden_size
66
+ self.intermediate_size = intermediate_size
67
+ self.num_hidden_layers = num_hidden_layers
68
+ self.num_attention_heads = num_attention_heads
69
+ self.num_key_value_heads = num_key_value_heads
70
+ self.head_dim = head_dim
71
+ self.hidden_act = hidden_act
72
+ self.layer_norm_eps = layer_norm_eps
73
+ self.image_default_input_size = image_default_input_size
74
+ self.image_patch_size = image_patch_size
75
+ self.image_num_pos = image_num_pos
76
+ self.attention_dropout = attention_dropout
77
+ self.residual_dropout = residual_dropout
78
+ self.initializer_range = initializer_range
79
+ self.float32_attention = float32_attention
80
+
81
+ @property
82
+ def image_num_patch(self):
83
+ h, w = self.image_default_input_size
84
+ return h // self.image_patch_size, w // self.image_patch_size
85
+
86
+
87
+ class Molmo2AdapterConfig(PretrainedConfig):
88
+ r"""
89
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
90
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
91
+ defining the model architecture.
92
+
93
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
94
+ documentation from [`PretrainedConfig`] for more information.
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
100
+
101
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
102
+ >>> vit_config = Molmo2VitConfig()
103
+ >>> adapter_config = MolmoPoolingConfig()
104
+
105
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
106
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> vit_configuration = model.vit_config
110
+ >>> adapter_configuration = model.adapter_config
111
+ ```"""
112
+
113
+ model_type = "molmo2"
114
+ base_config_key = "adapter_config"
115
+
116
+ def __init__(
117
+ self,
118
+ vit_layers: tuple = (-3, -9),
119
+ pooling_attention_mask: bool = False,
120
+ hidden_size: int = 1152,
121
+ num_attention_heads: int = 16,
122
+ num_key_value_heads: int = 16,
123
+ head_dim: int = 72,
124
+ float32_attention: bool = True,
125
+ attention_dropout: float = 0.0,
126
+ residual_dropout: float = 0.0,
127
+ hidden_act: str = "silu",
128
+ intermediate_size: int = 18944,
129
+ text_hidden_size: int = 3584,
130
+ image_feature_dropout: float = 0.0,
131
+ initializer_range: float = 0.02,
132
+ attn_implementation: str = "eager",
133
+ **kwargs,
134
+ ):
135
+ self.attn_implementation = attn_implementation
136
+ super().__init__(
137
+ attn_implementation=attn_implementation,
138
+ **kwargs
139
+ )
140
+ self.vit_layers = vit_layers
141
+ self.pooling_attention_mask = pooling_attention_mask
142
+ self.hidden_size = hidden_size
143
+ self.num_attention_heads = num_attention_heads
144
+ self.num_key_value_heads = num_key_value_heads
145
+ self.head_dim = head_dim
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = residual_dropout
149
+ self.hidden_act = hidden_act
150
+ self.intermediate_size = intermediate_size
151
+ self.text_hidden_size = text_hidden_size
152
+ self.image_feature_dropout = image_feature_dropout
153
+ self.initializer_range = initializer_range
154
+
155
+
156
+ class Molmo2TextConfig(PretrainedConfig):
157
+ r"""
158
+ This is the configuration class to store the configuration of a [`Molmo2TextModel`]. It is used to instantiate a
159
+ `Molmo2TextModel` according to the specified arguments, defining the model architecture.
160
+
161
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
162
+ documentation from [`PretrainedConfig`] for more information.
163
+
164
+ Example:
165
+ ```python
166
+ >>> from transformers import Molmo2TextConfig, Molmo2TextModel
167
+
168
+ >>> # Initializing a Molmo2TextConfig
169
+ >>> configuration = Molmo2TextConfig()
170
+
171
+ >>> # Initializing a Molmo2TextModel (with random weights)
172
+ >>> model = Molmo2TextModel(configuration)
173
+
174
+ >>> # Accessing the model configuration
175
+ >>> configuration = model.config
176
+ ```"""
177
+
178
+ model_type = "molmo2_text"
179
+ base_config_key = "text_config"
180
+ keys_to_ignore_at_inference = ["past_key_values"]
181
+ base_model_tp_plan = {
182
+ "blocks.*.self_attn.att_proj": "colwise",
183
+ "blocks.*.self_attn.attn_out": "rowwise",
184
+ "blocks.*.mlp.ff_proj": "colwise",
185
+ "blocks.*.mlp.ff_out": "rowwise",
186
+ }
187
+ base_model_pp_plan = {
188
+ "wte": (["input_ids"], ["inputs_embeds"]),
189
+ "blocks": (["hidden_states", "attention_mask"], ["hidden_states"]),
190
+ "ln_f": (["hidden_states"], ["hidden_states"]),
191
+ }
192
+
193
+ def __init__(
194
+ self,
195
+ hidden_size: int = 3584,
196
+ num_attention_heads: int = 28,
197
+ num_key_value_heads: Optional[int] = 4,
198
+ head_dim: int = 128,
199
+ vocab_size: int = 152064,
200
+ additional_vocab_size: int = 128,
201
+ qkv_bias: bool = True,
202
+ num_hidden_layers: int = 48,
203
+ intermediate_size: int = 18944,
204
+ hidden_act: str = "silu",
205
+ embedding_dropout: float=0.0,
206
+ attention_dropout: float=0.0,
207
+ residual_dropout: float = 0.0,
208
+ max_position_embeddings: int = 4096,
209
+ rope_theta: float = 1000000.0,
210
+ rope_scaling: dict[str, Any] = None,
211
+ rope_scaling_layers: Optional[list[int]] = None,
212
+ use_qk_norm: bool = False,
213
+ qk_norm_type: str = "olmo",
214
+ layer_norm_eps: int = 1e-6,
215
+ norm_after: bool = False,
216
+ initializer_range: float = 0.02,
217
+ use_cache=True,
218
+ tie_word_embeddings=False,
219
+ attn_implementation: str = "eager",
220
+ **kwargs,
221
+ ):
222
+ self.attn_implementation = attn_implementation
223
+ super().__init__(
224
+ tie_word_embeddings=tie_word_embeddings,
225
+ attn_implementation=attn_implementation,
226
+ **kwargs
227
+ )
228
+ self.hidden_size = hidden_size
229
+ self.num_attention_heads = num_attention_heads
230
+ if num_key_value_heads is None:
231
+ num_key_value_heads = num_attention_heads
232
+ self.num_key_value_heads = num_key_value_heads
233
+ self.head_dim = head_dim
234
+ self.vocab_size = vocab_size
235
+ self.additional_vocab_size = additional_vocab_size
236
+ self.qkv_bias = qkv_bias
237
+ self.num_hidden_layers = num_hidden_layers
238
+ self.intermediate_size = intermediate_size
239
+ self.hidden_act = hidden_act
240
+ self.embedding_dropout = embedding_dropout
241
+ self.attention_dropout = attention_dropout
242
+ self.residual_dropout = residual_dropout
243
+ self.max_position_embeddings = max_position_embeddings
244
+ self.rope_theta = rope_theta
245
+ self.rope_scaling = rope_scaling
246
+ self.rope_scaling_layers = rope_scaling_layers
247
+ self.use_qk_norm = use_qk_norm
248
+ self.qk_norm_type = qk_norm_type
249
+ self.layer_norm_eps = layer_norm_eps
250
+ self.norm_after = norm_after
251
+ self.initializer_range = initializer_range
252
+ self.use_cache = use_cache
253
+
254
+ # Validate the correctness of rotary position embeddings parameters
255
+ rope_config_validation(self)
256
+
257
+
258
+ class Molmo2Config(PretrainedConfig):
259
+ r"""
260
+ This is the configuration class to store the configuration of a [`Molmo2ForConditionalGeneration`].
261
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
262
+
263
+ Example:
264
+
265
+ ```python
266
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
267
+
268
+ >>> # Initializing a Molmo2VitConfig
269
+ >>> vit_config = Molmo2VitConfig()
270
+
271
+ >>> # Initializing a Molmo2AdapterConfig
272
+ >>> adapter_config = Molmo2AdapterConfig()
273
+
274
+ >>> # Initializing a Molmo2TextConfig
275
+ >>> text_config = Molmo2TextConfig()
276
+
277
+ >>> # Initializing a Molmo2Config
278
+ >>> configuration = Molmo2Config(
279
+ >>> vit_config=vit_config,
280
+ >>> adapter_config=adapter_config,
281
+ >>> text_config=text_config,
282
+ >>> image_start_token_id=151936,
283
+ >>> image_end_token_id=151937,
284
+ >>> image_patch_id=151938,
285
+ >>> image_col_id=151939,
286
+ >>> low_res_image_start_token_id=151940,
287
+ >>> image_low_res_id=151942,
288
+ >>> frame_start_token_id=151943,
289
+ >>> frame_end_token_id=151944,
290
+ >>> )
291
+
292
+ >>> # Initializing a model
293
+ >>> model = Molmo2ForConditionalGeneration(configuration)
294
+
295
+ >>> # Accessing the model configuration
296
+ >>> configuration = model.config
297
+ ```"""
298
+
299
+ model_type = "molmo2"
300
+ sub_configs = {
301
+ "text_config": Molmo2TextConfig,
302
+ "vit_config": Molmo2VitConfig,
303
+ "adapter_config": Molmo2AdapterConfig,
304
+ }
305
+
306
+ def __init__(
307
+ self,
308
+ vit_config: Molmo2VitConfig = None,
309
+ adapter_config: Molmo2AdapterConfig = None,
310
+ text_config: Molmo2TextConfig = None,
311
+ image_start_token_id: int = None,
312
+ low_res_image_start_token_id: int = None,
313
+ image_end_token_id: int = None,
314
+ image_low_res_id: int = None,
315
+ image_patch_id: int = None,
316
+ image_col_id: int = None,
317
+ frame_start_token_id: int = None,
318
+ frame_end_token_id: int = None,
319
+ use_frame_special_tokens: bool = True,
320
+ initializer_range: float = 0.02,
321
+ **kwargs,
322
+ ):
323
+ super().__init__(**kwargs)
324
+ if vit_config is None:
325
+ self.vit_config = Molmo2VitConfig()
326
+ elif isinstance(vit_config, dict):
327
+ self.vit_config = Molmo2VitConfig(**vit_config)
328
+ else:
329
+ self.vit_config = vit_config
330
+ if adapter_config is None:
331
+ self.adapter_config = Molmo2AdapterConfig()
332
+ elif isinstance(adapter_config, dict):
333
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
334
+ else:
335
+ self.adapter_config = adapter_config
336
+ if text_config is None:
337
+ self.text_config = Molmo2TextConfig()
338
+ elif isinstance(text_config, dict):
339
+ self.text_config = Molmo2TextConfig(**text_config)
340
+ else:
341
+ self.text_config = text_config
342
+ self.image_start_token_id = image_start_token_id
343
+ self.low_res_image_start_token_id = low_res_image_start_token_id
344
+ self.image_end_token_id = image_end_token_id
345
+ self.image_low_res_id = image_low_res_id
346
+ self.image_high_res_id = image_patch_id
347
+ self.image_patch_id = image_patch_id
348
+ self.image_col_id = image_col_id
349
+ self.frame_start_token_id = frame_start_token_id
350
+ self.frame_end_token_id = frame_end_token_id
351
+ self.use_frame_special_tokens = use_frame_special_tokens
352
+ self.initializer_range = initializer_range
353
+
354
+ @property
355
+ def image_num_patch(self):
356
+ assert self.vit_config is not None
357
+ return self.vit_config.image_num_patch
358
+
359
+ @property
360
+ def num_attention_heads(self):
361
+ return self.text_config.num_attention_heads
362
+
363
+ @property
364
+ def num_key_value_heads(self):
365
+ return self.text_config.num_key_value_heads
366
+
367
+ @property
368
+ def head_dim(self):
369
+ return self.text_config.head_dim
370
+
371
+ @property
372
+ def num_hidden_layers(self):
373
+ return self.text_config.num_hidden_layers
374
+
375
+ @property
376
+ def hidden_size(self):
377
+ return self.text_config.hidden_size
378
+
379
+ @property
380
+ def vocab_size(self):
381
+ return self.text_config.vocab_size
382
+
383
+ @property
384
+ def max_position_embeddings(self):
385
+ return self.text_config.max_position_embeddings
386
+
387
+
388
+ Molmo2VitConfig.register_for_auto_class()
389
+ Molmo2AdapterConfig.register_for_auto_class()
390
+ Molmo2TextConfig.register_for_auto_class()
391
+ Molmo2Config.register_for_auto_class()
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151645,
3
+ "eos_token_id": 151645,
4
+ "pad_token_id": 151643,
5
+ "transformers_version": "4.57.1"
6
+ }
image_processing_molmo2.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor class for Molmo2"""
2
+ from typing import Optional, Union
3
+ import numpy as np
4
+ import einops
5
+ import torch
6
+ import torchvision.transforms
7
+
8
+ from transformers.image_utils import (
9
+ IMAGENET_STANDARD_MEAN,
10
+ IMAGENET_STANDARD_STD,
11
+ ImageInput,
12
+ PILImageResampling,
13
+ make_flat_list_of_images,
14
+ valid_images,
15
+ to_numpy_array,
16
+ )
17
+ from transformers.image_transforms import convert_to_rgb
18
+ from transformers.processing_utils import ImagesKwargs
19
+ from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
20
+ from transformers.utils import logging
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.utils import TensorType, logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ def normalize_image(
29
+ image: np.ndarray,
30
+ image_mean: list[float],
31
+ image_std: list[float],
32
+ ) -> np.ndarray:
33
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
34
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
35
+ return image
36
+
37
+
38
+ def resize_image(
39
+ image: np.ndarray,
40
+ desired_output_size: list[int],
41
+ resample: PILImageResampling,
42
+ ) -> np.ndarray:
43
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
44
+ dtype = image.dtype
45
+ if torch.is_floating_point(image):
46
+ in_min = 0.0
47
+ in_max = 1.0
48
+ resized = torchvision.transforms.Resize(
49
+ desired_output_size,
50
+ resample,
51
+ antialias=False,
52
+ )(image)
53
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
54
+ else:
55
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
56
+ in_min = 0.0
57
+ in_max = 255.0
58
+ resized = torchvision.transforms.Resize(
59
+ desired_output_size,
60
+ resample,
61
+ antialias=False,
62
+ )(image)
63
+ resized = torch.clip(resized, 0, 255).to(dtype)
64
+
65
+ resized = resized.to(torch.float32)
66
+ resized = (resized - in_min) / (in_max - in_min)
67
+
68
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
69
+
70
+ return resized
71
+
72
+
73
+ def select_tiling(h, w, patch_size, max_num_crops):
74
+ """Divide in image of size [w, h] in up to max_num_patches of size patch_size"""
75
+ original_size = np.stack([h, w]) # [1, 2]
76
+ original_res = h * w
77
+ tilings = []
78
+ for i in range(1, max_num_crops + 1):
79
+ for j in range(1, max_num_crops + 1):
80
+ if i*j <= max_num_crops:
81
+ tilings.append((i, j))
82
+ # sort so argmin and argmax favour smaller tilings in the event of a tie
83
+ tilings.sort(key=lambda x: (x[0]*x[1], x[0]))
84
+ candidate_tilings = np.array(tilings, dtype=np.int32) # [n_resolutions, 2]
85
+ candidate_resolutions = candidate_tilings * patch_size # [n_resolutions, 2]
86
+
87
+ # How much we would need to scale the image to fit exactly in each tiling
88
+ original_size = np.stack([h, w], dtype=np.float32) # [1, 2]
89
+
90
+ # The original size can be zero in rare cases if the image is smaller than the margin
91
+ # In those cases letting the scale become infinite means the tiling is based on the
92
+ # other side, or falls back to the smallest tiling
93
+ with np.errstate(divide='ignore'):
94
+ required_scale_d = candidate_resolutions.astype(np.float32) / original_size,
95
+ required_scale = np.min(required_scale_d, axis=-1, keepdims=True) # [n_resolutions, 1]
96
+ if np.all(required_scale < 1):
97
+ # We are forced to downscale, so try to minimize the amount of downscaling
98
+ ix = np.argmax(required_scale)
99
+ else:
100
+ # Pick the resolution that required the least upscaling so that it most closely fits the image
101
+ required_scale = np.where(required_scale < 1.0, 10e9, required_scale)
102
+ ix = np.argmin(required_scale)
103
+ return candidate_tilings[ix]
104
+
105
+
106
+ def build_resized_image(
107
+ image: np.ndarray,
108
+ base_image_input_size: list[int],
109
+ resample: PILImageResampling,
110
+ image_mean: list[float],
111
+ image_std: list[float],
112
+ image_patch_size: int,
113
+ ) -> tuple[np.ndarray, np.ndarray]:
114
+ resized = resize_image(
115
+ image, base_image_input_size, resample,
116
+ )
117
+ resized = normalize_image(resized, image_mean, image_std)
118
+ if len(resized.shape) == 3:
119
+ resized = np.expand_dims(resized, 0)
120
+ crop_patch_w = base_image_input_size[1] // image_patch_size
121
+ crop_patch_h = base_image_input_size[0] // image_patch_size
122
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
123
+ return resized, resize_idx
124
+
125
+
126
+ def build_overlapping_crops(
127
+ image: np.ndarray,
128
+ max_crops: int,
129
+ overlap_margins: list[int],
130
+ base_image_input_size: list[int],
131
+ resample: PILImageResampling,
132
+ image_mean: list[float],
133
+ image_std: list[float],
134
+ image_patch_size: int,
135
+ ) -> tuple[np.ndarray, np.ndarray]:
136
+ """Decompose an image into a set of overlapping crops
137
+
138
+ :return crop_arr: [n_crops, h, w, 3] The crops
139
+ :return patch_idx: [overlap_patch_h, overlap_patch_w] For each patch in the resized image
140
+ the crops were extracted from, what patch in `crop_arr` it corresponds to
141
+ """
142
+ original_image_h, original_image_w = image.shape[:2]
143
+ crop_size = base_image_input_size[0]
144
+ assert base_image_input_size[0] == base_image_input_size[1]
145
+
146
+ left_margin, right_margin = overlap_margins
147
+ total_margin_pixels = image_patch_size * (right_margin + left_margin) # pixels removed per dim
148
+ crop_patches = base_image_input_size[0] // image_patch_size # patches per crop dim
149
+ crop_window_patches = crop_patches - (right_margin + left_margin) # usable patches
150
+ crop_window_size = crop_window_patches * image_patch_size
151
+ crop_patch_w = base_image_input_size[1] // image_patch_size
152
+ crop_patch_h = base_image_input_size[0] // image_patch_size
153
+ original_image_h, original_image_w = image.shape[:2]
154
+ crop_size = base_image_input_size[0]
155
+
156
+ # Decide how to tile the image, to account for the overlap margins we compute the tiling
157
+ # as if we had an image without the margins and were using a crop size without the margins
158
+ tiling = select_tiling(
159
+ original_image_h - total_margin_pixels,
160
+ original_image_w - total_margin_pixels,
161
+ crop_window_size,
162
+ max_crops,
163
+ )
164
+
165
+ src = resize_image(
166
+ image,
167
+ [tiling[0]*crop_window_size+total_margin_pixels, tiling[1]*crop_window_size+total_margin_pixels],
168
+ resample,
169
+ )
170
+ src = normalize_image(src, image_mean, image_std)
171
+
172
+ # Now we have to split the image into crops, and track what patches came from
173
+ # where in `patch_idx_arr`
174
+ n_crops = tiling[0] * tiling[1]
175
+ crop_arr = np.zeros([n_crops, crop_size, crop_size, 3], dtype=src.dtype)
176
+ patch_idx_arr = np.zeros([n_crops, crop_patch_h, crop_patch_w], dtype=np.int32)
177
+ on_crop = 0
178
+ for i in range(tiling[0]):
179
+ # Slide over `src` by `crop_window_size` steps, but extract crops of size `crops_size`
180
+ # which results in overlapping crop windows
181
+ y0 = i*crop_window_size
182
+ for j in range(tiling[1]):
183
+ x0 = j*crop_window_size
184
+ crop_arr[on_crop] = src[y0:y0+crop_size, x0:x0+crop_size]
185
+ patch_idx = np.arange(crop_patch_w*crop_patch_h).reshape(crop_patch_h, crop_patch_w)
186
+ patch_idx += on_crop * crop_patch_h * crop_patch_w
187
+
188
+ # Mask out idx that are in the overlap region
189
+ if i != 0:
190
+ patch_idx[:left_margin, :] = -1
191
+ if j != 0:
192
+ patch_idx[:, :left_margin] = -1
193
+ if i != tiling[0]-1:
194
+ patch_idx[-right_margin:, :] = -1
195
+ if j != tiling[1]-1:
196
+ patch_idx[:, -right_margin:] = -1
197
+ patch_idx_arr[on_crop] = patch_idx
198
+ on_crop += 1
199
+
200
+ # `patch_idx_arr` is ordered crop-by-crop, here we transpose `patch_idx_arr`
201
+ # so it is ordered left-to-right order
202
+ patch_idx_arr = np.reshape(
203
+ patch_idx_arr,
204
+ [tiling[0], tiling[1], crop_patch_h, crop_patch_w]
205
+ )
206
+ patch_idx_arr = np.transpose(patch_idx_arr, [0, 2, 1, 3])
207
+ patch_idx_arr = np.reshape(patch_idx_arr, [-1])
208
+
209
+ # Now get the parts not in the overlap region, so it should map each patch in `src`
210
+ # to the correct patch it should come from in `crop_arr`
211
+ patch_idx_arr = patch_idx_arr[patch_idx_arr >= 0].reshape(
212
+ src.shape[0]//image_patch_size,
213
+ src.shape[1]//image_patch_size,
214
+ )
215
+ return crop_arr, patch_idx_arr
216
+
217
+
218
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
219
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
220
+ if len(array.shape) == 3:
221
+ n_crops, h, w = array.shape
222
+ h_patches = h//patch_size
223
+ w_patches = w//patch_size
224
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
225
+ array = np.transpose(array, [0, 1, 3, 2, 4])
226
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
227
+ return array
228
+ else:
229
+ n_crops, h, w, c = array.shape
230
+ h_patches = h//patch_size
231
+ w_patches = w//patch_size
232
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
233
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
234
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
235
+ return array
236
+
237
+
238
+ def arange_for_pooling(
239
+ idx_arr: np.ndarray,
240
+ pool_h: int,
241
+ pool_w: int,
242
+ ) -> np.ndarray:
243
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
244
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
245
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
246
+ mode='constant',constant_values=-1)
247
+ return einops.rearrange(
248
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
249
+
250
+
251
+ def image_to_patches_and_grids(
252
+ image: np.ndarray,
253
+ max_crops: int,
254
+ overlap_margins: list[int],
255
+ base_image_input_size: list[int],
256
+ resample: PILImageResampling,
257
+ image_mean: list[float],
258
+ image_std: list[float],
259
+ image_patch_size: int,
260
+ image_pooling_w: int,
261
+ image_pooling_h: int,
262
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
263
+ """
264
+ :return image_grids, the shape of each (low-res, high-res) image after pooling
265
+ :return crops, the image crops to processes with the ViT
266
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
267
+ patches in `crops` to pool for that token, masked with -1
268
+ """
269
+ if isinstance(base_image_input_size, int):
270
+ base_image_input_size = (base_image_input_size, base_image_input_size)
271
+
272
+ base_image_input_d = image_patch_size
273
+ pooling_w = image_pooling_w
274
+ pooling_h = image_pooling_h
275
+ crop_patch_w = base_image_input_size[1] // base_image_input_d
276
+ crop_patch_h = base_image_input_size[0] // base_image_input_d
277
+
278
+ crop_arr, patch_idx_arr = build_overlapping_crops(
279
+ image,
280
+ max_crops,
281
+ overlap_margins,
282
+ base_image_input_size,
283
+ resample,
284
+ image_mean,
285
+ image_std,
286
+ image_patch_size,
287
+ )
288
+ pooling_idx = arange_for_pooling(patch_idx_arr, pooling_h, pooling_w)
289
+ h, w = pooling_idx.shape[:2]
290
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
291
+
292
+ # Finally do the same for the global image
293
+ resized, resize_idx = build_resized_image(
294
+ image,
295
+ base_image_input_size,
296
+ resample,
297
+ image_mean,
298
+ image_std,
299
+ image_patch_size,
300
+ )
301
+ crop_arr = np.concatenate([resized, crop_arr], 0)
302
+
303
+ resize_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
304
+ resized_h, resized_w = resize_idx.shape[:2]
305
+ resize_idx = resize_idx.reshape([-1, pooling_h*pooling_w])
306
+
307
+ # Global image goes first, so the order of patches in previous crops gets increased
308
+ pooling_idx = np.where(
309
+ pooling_idx >= 0,
310
+ pooling_idx + crop_patch_h*crop_patch_w,
311
+ -1
312
+ )
313
+ pooling_idx = np.concatenate([resize_idx, pooling_idx])
314
+ image_grid = [np.array([resized_h, resized_w, h, w])]
315
+
316
+ return (
317
+ np.stack(image_grid, 0),
318
+ batch_pixels_to_patches(crop_arr, image_patch_size),
319
+ pooling_idx
320
+ )
321
+
322
+
323
+ class Molmo2ImagesKwargs(ImagesKwargs, total=False):
324
+ max_crops: Optional[int]
325
+ overlap_margins: Optional[list[int]]
326
+ patch_size: Optional[int]
327
+ pooling_size: Optional[list[int]]
328
+
329
+
330
+ class Molmo2ImageProcessor(BaseImageProcessor):
331
+ r"""
332
+ Constructs a Molmo2 image processor that preprocesses images for the model.
333
+
334
+ Args:
335
+ size (`dict[str, int]` *optional*, defaults to `{"height": 378, "width": 378}`):
336
+ Size of the image after resizing.
337
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
338
+ Resampling filter to use when resizing the image.
339
+ image_mean (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
340
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
341
+ image_std (`float` or `list[float]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
342
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
343
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
344
+ Whether to convert the image to RGB.
345
+ max_crops (`int`, *optional*, defaults to `8`):
346
+ Maximum number of crops to use per image.
347
+ overlap_margins (`list[int]`, *optional*, defaults to `[4, 4]`):
348
+ Overlap margins to use.
349
+ patch_size (`int`, *optional*, defaults to 14):
350
+ The spatial patch size of the vision encoder.
351
+ pooling_size (`list[int]`, *optional*, defaults to `[2, 2]`):
352
+ The pooling size of the vision adapter.
353
+ """
354
+
355
+ model_input_names = ["pixel_values", "image_token_pooling", "image_grids", "image_num_crops"]
356
+
357
+ def __init__(
358
+ self,
359
+ size: Optional[dict[str, int]] = None,
360
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
361
+ image_mean: Optional[Union[float, list[float]]] = None,
362
+ image_std: Optional[Union[float, list[float]]] = None,
363
+ do_convert_rgb: bool = True,
364
+ max_crops: int = 8,
365
+ overlap_margins: list[int] = [4, 4],
366
+ patch_size: int = 14,
367
+ pooling_size: list[int] = [2, 2],
368
+ **kwargs,
369
+ ) -> None:
370
+ super().__init__(**kwargs)
371
+ size = size if size is not None else {"height": 378, "width": 378}
372
+ size = get_size_dict(size, default_to_square=True)
373
+ self.size = size
374
+
375
+ self.resample = resample
376
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
377
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
378
+ self.do_convert_rgb = do_convert_rgb
379
+
380
+ self.max_crops = max_crops
381
+ self.overlap_margins = overlap_margins
382
+ self.patch_size = patch_size
383
+ self.pooling_size = pooling_size
384
+
385
+ def preprocess(
386
+ self,
387
+ images: ImageInput,
388
+ size: Optional[dict[str, int]] = None,
389
+ resample: Optional[PILImageResampling] = None,
390
+ image_mean: Optional[Union[float, list[float]]] = None,
391
+ image_std: Optional[Union[float, list[float]]] = None,
392
+ do_convert_rgb: Optional[bool] = None,
393
+ max_crops: Optional[int] = None,
394
+ overlap_margins: Optional[list[int]] = None,
395
+ patch_size: Optional[int] = None,
396
+ pooling_size: Optional[list[int]] = None,
397
+ return_tensors: Optional[Union[str, TensorType]] = None,
398
+ **kwargs,
399
+ ) -> BatchFeature:
400
+ """
401
+ Args:
402
+ images (`ImageInput`):
403
+ Image to preprocess.
404
+ size (`dict[str, int]`, *optional*, defaults to `self.size`):
405
+ Size of the image after resizing.
406
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
407
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
408
+ has an effect if `do_resize` is set to `True`.
409
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
410
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
411
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
412
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
413
+ `True`.
414
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
415
+ Whether to convert the image to RGB.
416
+ max_crops (`int`, *optional*, defaults to `self.max_crops`):
417
+ Maximum number of crops to use per image.
418
+ overlap_margins (`list[int]`, *optional*, defaults to `self.overlap_margins`):
419
+ Overlap margins to use.
420
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
421
+ The spatial patch size of the vision encoder.
422
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
423
+ The pooling size of the vision adapter.
424
+ return_tensors (`str` or `TensorType`, *optional*):
425
+ The type of tensors to return. Can be one of:
426
+ - Unset: Return a list of `np.ndarray`.
427
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
428
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
429
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
430
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
431
+
432
+ Returns:
433
+ A `BatchFeature` containing the following keys:
434
+ - `pixel_values`: The preprocessed images.
435
+ - `image_token_pooling`: The indices of the patches in `crops` to pool for each token in `image_tokens`.
436
+ - `image_grids`: The image grids.
437
+ - `image_num_crops`: The number of crops for each image.
438
+ """
439
+ if size is not None:
440
+ if "height" not in size or "width" not in size:
441
+ raise ValueError("size must contain 'height' and 'width' keys.")
442
+ else:
443
+ size = {**self.size}
444
+
445
+ base_image_input_size = [size["height"], size["width"]]
446
+
447
+ resample = resample or self.resample
448
+ image_mean = image_mean or self.image_mean
449
+ image_std = image_std or self.image_std
450
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
451
+
452
+ max_crops = max_crops or self.max_crops
453
+ overlap_margins = overlap_margins or self.overlap_margins
454
+ patch_size = patch_size or self.patch_size
455
+ pooling_size = pooling_size or self.pooling_size
456
+
457
+ image_pooling_h, image_pooling_w = pooling_size
458
+
459
+ if images is not None:
460
+ images = self.fetch_images(images)
461
+ images = make_flat_list_of_images(images)
462
+
463
+ if images is not None and not valid_images(images):
464
+ raise ValueError(
465
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
466
+ "torch.Tensor, tf.Tensor or jax.ndarray."
467
+ )
468
+
469
+ if do_convert_rgb:
470
+ images = [convert_to_rgb(image) for image in images]
471
+
472
+ # All transformations expect numpy arrays.
473
+ images = [to_numpy_array(image) for image in images]
474
+
475
+ data = {}
476
+ if images is not None:
477
+ batch_grids = []
478
+ batch_crops = []
479
+ batch_pooled_patches_idx = []
480
+ batch_num_crops = []
481
+
482
+ for image in images:
483
+ image_grid, crops, pooled_idx = image_to_patches_and_grids(
484
+ image,
485
+ max_crops,
486
+ overlap_margins,
487
+ base_image_input_size,
488
+ resample,
489
+ image_mean,
490
+ image_std,
491
+ patch_size,
492
+ image_pooling_w,
493
+ image_pooling_h,
494
+ )
495
+ batch_grids.append(image_grid)
496
+ batch_crops.append(crops)
497
+ batch_pooled_patches_idx.append(pooled_idx)
498
+ batch_num_crops.append(crops.shape[0])
499
+
500
+ pixel_values = np.concatenate(batch_crops, 0)
501
+ image_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
502
+ image_grids = np.concatenate(batch_grids, 0)
503
+ image_num_crops = np.array(batch_num_crops)
504
+
505
+ data.update(
506
+ pixel_values=pixel_values,
507
+ image_token_pooling=image_token_pooling,
508
+ image_grids=image_grids,
509
+ image_num_crops=image_num_crops,
510
+ )
511
+
512
+ return BatchFeature(data, tensor_type=return_tensors)
513
+
514
+
515
+ Molmo2ImageProcessor.register_for_auto_class()
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e36bb1dd408569baa883ebdbff142d6d71ad9b2bcdb335c2d7c103ae8184aad
3
+ size 4974567112
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a148abecde4269be5b71362d1e2b8d8b7f3b59a8761565d3ca8f1472bf1d463b
3
+ size 4630720272
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d4570c4a93212a62649fc205a4c1b894b58d69b32419686547be0f172a056c8
3
+ size 4630720296
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c572605ad05b34b25d9ecd0f0af3bc7af9f59f2201c2425e1417841a0b82a0c
3
+ size 4630720320
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:844a285fc8c07677c48cd2066d2e6ed753eaa74bf2934dc905fa5e2f0ac1c0cf
3
+ size 4630720320
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7537a5af433d4ce6373b065b96d8504b530875ef1b9becb3c558066f45c35499
3
+ size 4630720320
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4373ca73e9951d3f11a38a0344cc7772d7ead0152b43584779f4f728306f11
3
+ size 4029422080
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2b1c96b4e41af8c8729e4d97e0f9d5039bec6f2fa39808bc7cfcb4b0ca55e0
3
+ size 2489319552
model.safetensors.index.json ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 8661703120,
4
+ "total_size": 34646812480
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00008-of-00008.safetensors",
8
+ "model.transformer.blocks.0.attn_norm.weight": "model-00001-of-00008.safetensors",
9
+ "model.transformer.blocks.0.ff_norm.weight": "model-00001-of-00008.safetensors",
10
+ "model.transformer.blocks.0.mlp.ff_out.weight": "model-00001-of-00008.safetensors",
11
+ "model.transformer.blocks.0.mlp.ff_proj.weight": "model-00001-of-00008.safetensors",
12
+ "model.transformer.blocks.0.self_attn.att_proj.weight": "model-00001-of-00008.safetensors",
13
+ "model.transformer.blocks.0.self_attn.attn_out.weight": "model-00001-of-00008.safetensors",
14
+ "model.transformer.blocks.0.self_attn.k_norm.weight": "model-00001-of-00008.safetensors",
15
+ "model.transformer.blocks.0.self_attn.q_norm.weight": "model-00001-of-00008.safetensors",
16
+ "model.transformer.blocks.1.attn_norm.weight": "model-00001-of-00008.safetensors",
17
+ "model.transformer.blocks.1.ff_norm.weight": "model-00001-of-00008.safetensors",
18
+ "model.transformer.blocks.1.mlp.ff_out.weight": "model-00001-of-00008.safetensors",
19
+ "model.transformer.blocks.1.mlp.ff_proj.weight": "model-00001-of-00008.safetensors",
20
+ "model.transformer.blocks.1.self_attn.att_proj.weight": "model-00001-of-00008.safetensors",
21
+ "model.transformer.blocks.1.self_attn.attn_out.weight": "model-00001-of-00008.safetensors",
22
+ "model.transformer.blocks.1.self_attn.k_norm.weight": "model-00001-of-00008.safetensors",
23
+ "model.transformer.blocks.1.self_attn.q_norm.weight": "model-00001-of-00008.safetensors",
24
+ "model.transformer.blocks.10.attn_norm.weight": "model-00003-of-00008.safetensors",
25
+ "model.transformer.blocks.10.ff_norm.weight": "model-00003-of-00008.safetensors",
26
+ "model.transformer.blocks.10.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
27
+ "model.transformer.blocks.10.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
28
+ "model.transformer.blocks.10.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
29
+ "model.transformer.blocks.10.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
30
+ "model.transformer.blocks.10.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
31
+ "model.transformer.blocks.10.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
32
+ "model.transformer.blocks.11.attn_norm.weight": "model-00003-of-00008.safetensors",
33
+ "model.transformer.blocks.11.ff_norm.weight": "model-00003-of-00008.safetensors",
34
+ "model.transformer.blocks.11.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
35
+ "model.transformer.blocks.11.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
36
+ "model.transformer.blocks.11.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
37
+ "model.transformer.blocks.11.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
38
+ "model.transformer.blocks.11.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
39
+ "model.transformer.blocks.11.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
40
+ "model.transformer.blocks.12.attn_norm.weight": "model-00003-of-00008.safetensors",
41
+ "model.transformer.blocks.12.ff_norm.weight": "model-00003-of-00008.safetensors",
42
+ "model.transformer.blocks.12.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
43
+ "model.transformer.blocks.12.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
44
+ "model.transformer.blocks.12.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
45
+ "model.transformer.blocks.12.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
46
+ "model.transformer.blocks.12.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
47
+ "model.transformer.blocks.12.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
48
+ "model.transformer.blocks.13.attn_norm.weight": "model-00003-of-00008.safetensors",
49
+ "model.transformer.blocks.13.ff_norm.weight": "model-00003-of-00008.safetensors",
50
+ "model.transformer.blocks.13.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
51
+ "model.transformer.blocks.13.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
52
+ "model.transformer.blocks.13.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
53
+ "model.transformer.blocks.13.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
54
+ "model.transformer.blocks.13.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
55
+ "model.transformer.blocks.13.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
56
+ "model.transformer.blocks.14.attn_norm.weight": "model-00003-of-00008.safetensors",
57
+ "model.transformer.blocks.14.ff_norm.weight": "model-00003-of-00008.safetensors",
58
+ "model.transformer.blocks.14.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
59
+ "model.transformer.blocks.14.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
60
+ "model.transformer.blocks.14.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
61
+ "model.transformer.blocks.14.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
62
+ "model.transformer.blocks.14.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
63
+ "model.transformer.blocks.14.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
64
+ "model.transformer.blocks.15.attn_norm.weight": "model-00003-of-00008.safetensors",
65
+ "model.transformer.blocks.15.ff_norm.weight": "model-00004-of-00008.safetensors",
66
+ "model.transformer.blocks.15.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
67
+ "model.transformer.blocks.15.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
68
+ "model.transformer.blocks.15.self_attn.att_proj.weight": "model-00003-of-00008.safetensors",
69
+ "model.transformer.blocks.15.self_attn.attn_out.weight": "model-00003-of-00008.safetensors",
70
+ "model.transformer.blocks.15.self_attn.k_norm.weight": "model-00003-of-00008.safetensors",
71
+ "model.transformer.blocks.15.self_attn.q_norm.weight": "model-00003-of-00008.safetensors",
72
+ "model.transformer.blocks.16.attn_norm.weight": "model-00004-of-00008.safetensors",
73
+ "model.transformer.blocks.16.ff_norm.weight": "model-00004-of-00008.safetensors",
74
+ "model.transformer.blocks.16.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
75
+ "model.transformer.blocks.16.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
76
+ "model.transformer.blocks.16.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
77
+ "model.transformer.blocks.16.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
78
+ "model.transformer.blocks.16.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
79
+ "model.transformer.blocks.16.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
80
+ "model.transformer.blocks.17.attn_norm.weight": "model-00004-of-00008.safetensors",
81
+ "model.transformer.blocks.17.ff_norm.weight": "model-00004-of-00008.safetensors",
82
+ "model.transformer.blocks.17.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
83
+ "model.transformer.blocks.17.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
84
+ "model.transformer.blocks.17.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
85
+ "model.transformer.blocks.17.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
86
+ "model.transformer.blocks.17.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
87
+ "model.transformer.blocks.17.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
88
+ "model.transformer.blocks.18.attn_norm.weight": "model-00004-of-00008.safetensors",
89
+ "model.transformer.blocks.18.ff_norm.weight": "model-00004-of-00008.safetensors",
90
+ "model.transformer.blocks.18.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
91
+ "model.transformer.blocks.18.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
92
+ "model.transformer.blocks.18.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
93
+ "model.transformer.blocks.18.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
94
+ "model.transformer.blocks.18.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
95
+ "model.transformer.blocks.18.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
96
+ "model.transformer.blocks.19.attn_norm.weight": "model-00004-of-00008.safetensors",
97
+ "model.transformer.blocks.19.ff_norm.weight": "model-00004-of-00008.safetensors",
98
+ "model.transformer.blocks.19.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
99
+ "model.transformer.blocks.19.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
100
+ "model.transformer.blocks.19.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
101
+ "model.transformer.blocks.19.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
102
+ "model.transformer.blocks.19.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
103
+ "model.transformer.blocks.19.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
104
+ "model.transformer.blocks.2.attn_norm.weight": "model-00001-of-00008.safetensors",
105
+ "model.transformer.blocks.2.ff_norm.weight": "model-00001-of-00008.safetensors",
106
+ "model.transformer.blocks.2.mlp.ff_out.weight": "model-00001-of-00008.safetensors",
107
+ "model.transformer.blocks.2.mlp.ff_proj.weight": "model-00001-of-00008.safetensors",
108
+ "model.transformer.blocks.2.self_attn.att_proj.weight": "model-00001-of-00008.safetensors",
109
+ "model.transformer.blocks.2.self_attn.attn_out.weight": "model-00001-of-00008.safetensors",
110
+ "model.transformer.blocks.2.self_attn.k_norm.weight": "model-00001-of-00008.safetensors",
111
+ "model.transformer.blocks.2.self_attn.q_norm.weight": "model-00001-of-00008.safetensors",
112
+ "model.transformer.blocks.20.attn_norm.weight": "model-00004-of-00008.safetensors",
113
+ "model.transformer.blocks.20.ff_norm.weight": "model-00004-of-00008.safetensors",
114
+ "model.transformer.blocks.20.mlp.ff_out.weight": "model-00004-of-00008.safetensors",
115
+ "model.transformer.blocks.20.mlp.ff_proj.weight": "model-00004-of-00008.safetensors",
116
+ "model.transformer.blocks.20.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
117
+ "model.transformer.blocks.20.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
118
+ "model.transformer.blocks.20.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
119
+ "model.transformer.blocks.20.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
120
+ "model.transformer.blocks.21.attn_norm.weight": "model-00004-of-00008.safetensors",
121
+ "model.transformer.blocks.21.ff_norm.weight": "model-00005-of-00008.safetensors",
122
+ "model.transformer.blocks.21.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
123
+ "model.transformer.blocks.21.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
124
+ "model.transformer.blocks.21.self_attn.att_proj.weight": "model-00004-of-00008.safetensors",
125
+ "model.transformer.blocks.21.self_attn.attn_out.weight": "model-00004-of-00008.safetensors",
126
+ "model.transformer.blocks.21.self_attn.k_norm.weight": "model-00004-of-00008.safetensors",
127
+ "model.transformer.blocks.21.self_attn.q_norm.weight": "model-00004-of-00008.safetensors",
128
+ "model.transformer.blocks.22.attn_norm.weight": "model-00005-of-00008.safetensors",
129
+ "model.transformer.blocks.22.ff_norm.weight": "model-00005-of-00008.safetensors",
130
+ "model.transformer.blocks.22.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
131
+ "model.transformer.blocks.22.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
132
+ "model.transformer.blocks.22.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
133
+ "model.transformer.blocks.22.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
134
+ "model.transformer.blocks.22.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
135
+ "model.transformer.blocks.22.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
136
+ "model.transformer.blocks.23.attn_norm.weight": "model-00005-of-00008.safetensors",
137
+ "model.transformer.blocks.23.ff_norm.weight": "model-00005-of-00008.safetensors",
138
+ "model.transformer.blocks.23.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
139
+ "model.transformer.blocks.23.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
140
+ "model.transformer.blocks.23.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
141
+ "model.transformer.blocks.23.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
142
+ "model.transformer.blocks.23.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
143
+ "model.transformer.blocks.23.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
144
+ "model.transformer.blocks.24.attn_norm.weight": "model-00005-of-00008.safetensors",
145
+ "model.transformer.blocks.24.ff_norm.weight": "model-00005-of-00008.safetensors",
146
+ "model.transformer.blocks.24.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
147
+ "model.transformer.blocks.24.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
148
+ "model.transformer.blocks.24.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
149
+ "model.transformer.blocks.24.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
150
+ "model.transformer.blocks.24.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
151
+ "model.transformer.blocks.24.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
152
+ "model.transformer.blocks.25.attn_norm.weight": "model-00005-of-00008.safetensors",
153
+ "model.transformer.blocks.25.ff_norm.weight": "model-00005-of-00008.safetensors",
154
+ "model.transformer.blocks.25.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
155
+ "model.transformer.blocks.25.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
156
+ "model.transformer.blocks.25.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
157
+ "model.transformer.blocks.25.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
158
+ "model.transformer.blocks.25.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
159
+ "model.transformer.blocks.25.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
160
+ "model.transformer.blocks.26.attn_norm.weight": "model-00005-of-00008.safetensors",
161
+ "model.transformer.blocks.26.ff_norm.weight": "model-00005-of-00008.safetensors",
162
+ "model.transformer.blocks.26.mlp.ff_out.weight": "model-00005-of-00008.safetensors",
163
+ "model.transformer.blocks.26.mlp.ff_proj.weight": "model-00005-of-00008.safetensors",
164
+ "model.transformer.blocks.26.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
165
+ "model.transformer.blocks.26.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
166
+ "model.transformer.blocks.26.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
167
+ "model.transformer.blocks.26.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
168
+ "model.transformer.blocks.27.attn_norm.weight": "model-00005-of-00008.safetensors",
169
+ "model.transformer.blocks.27.ff_norm.weight": "model-00006-of-00008.safetensors",
170
+ "model.transformer.blocks.27.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
171
+ "model.transformer.blocks.27.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
172
+ "model.transformer.blocks.27.self_attn.att_proj.weight": "model-00005-of-00008.safetensors",
173
+ "model.transformer.blocks.27.self_attn.attn_out.weight": "model-00005-of-00008.safetensors",
174
+ "model.transformer.blocks.27.self_attn.k_norm.weight": "model-00005-of-00008.safetensors",
175
+ "model.transformer.blocks.27.self_attn.q_norm.weight": "model-00005-of-00008.safetensors",
176
+ "model.transformer.blocks.28.attn_norm.weight": "model-00006-of-00008.safetensors",
177
+ "model.transformer.blocks.28.ff_norm.weight": "model-00006-of-00008.safetensors",
178
+ "model.transformer.blocks.28.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
179
+ "model.transformer.blocks.28.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
180
+ "model.transformer.blocks.28.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
181
+ "model.transformer.blocks.28.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
182
+ "model.transformer.blocks.28.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
183
+ "model.transformer.blocks.28.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
184
+ "model.transformer.blocks.29.attn_norm.weight": "model-00006-of-00008.safetensors",
185
+ "model.transformer.blocks.29.ff_norm.weight": "model-00006-of-00008.safetensors",
186
+ "model.transformer.blocks.29.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
187
+ "model.transformer.blocks.29.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
188
+ "model.transformer.blocks.29.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
189
+ "model.transformer.blocks.29.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
190
+ "model.transformer.blocks.29.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
191
+ "model.transformer.blocks.29.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
192
+ "model.transformer.blocks.3.attn_norm.weight": "model-00001-of-00008.safetensors",
193
+ "model.transformer.blocks.3.ff_norm.weight": "model-00002-of-00008.safetensors",
194
+ "model.transformer.blocks.3.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
195
+ "model.transformer.blocks.3.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
196
+ "model.transformer.blocks.3.self_attn.att_proj.weight": "model-00001-of-00008.safetensors",
197
+ "model.transformer.blocks.3.self_attn.attn_out.weight": "model-00001-of-00008.safetensors",
198
+ "model.transformer.blocks.3.self_attn.k_norm.weight": "model-00001-of-00008.safetensors",
199
+ "model.transformer.blocks.3.self_attn.q_norm.weight": "model-00001-of-00008.safetensors",
200
+ "model.transformer.blocks.30.attn_norm.weight": "model-00006-of-00008.safetensors",
201
+ "model.transformer.blocks.30.ff_norm.weight": "model-00006-of-00008.safetensors",
202
+ "model.transformer.blocks.30.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
203
+ "model.transformer.blocks.30.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
204
+ "model.transformer.blocks.30.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
205
+ "model.transformer.blocks.30.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
206
+ "model.transformer.blocks.30.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
207
+ "model.transformer.blocks.30.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
208
+ "model.transformer.blocks.31.attn_norm.weight": "model-00006-of-00008.safetensors",
209
+ "model.transformer.blocks.31.ff_norm.weight": "model-00006-of-00008.safetensors",
210
+ "model.transformer.blocks.31.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
211
+ "model.transformer.blocks.31.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
212
+ "model.transformer.blocks.31.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
213
+ "model.transformer.blocks.31.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
214
+ "model.transformer.blocks.31.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
215
+ "model.transformer.blocks.31.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
216
+ "model.transformer.blocks.32.attn_norm.weight": "model-00006-of-00008.safetensors",
217
+ "model.transformer.blocks.32.ff_norm.weight": "model-00006-of-00008.safetensors",
218
+ "model.transformer.blocks.32.mlp.ff_out.weight": "model-00006-of-00008.safetensors",
219
+ "model.transformer.blocks.32.mlp.ff_proj.weight": "model-00006-of-00008.safetensors",
220
+ "model.transformer.blocks.32.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
221
+ "model.transformer.blocks.32.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
222
+ "model.transformer.blocks.32.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
223
+ "model.transformer.blocks.32.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
224
+ "model.transformer.blocks.33.attn_norm.weight": "model-00006-of-00008.safetensors",
225
+ "model.transformer.blocks.33.ff_norm.weight": "model-00007-of-00008.safetensors",
226
+ "model.transformer.blocks.33.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
227
+ "model.transformer.blocks.33.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
228
+ "model.transformer.blocks.33.self_attn.att_proj.weight": "model-00006-of-00008.safetensors",
229
+ "model.transformer.blocks.33.self_attn.attn_out.weight": "model-00006-of-00008.safetensors",
230
+ "model.transformer.blocks.33.self_attn.k_norm.weight": "model-00006-of-00008.safetensors",
231
+ "model.transformer.blocks.33.self_attn.q_norm.weight": "model-00006-of-00008.safetensors",
232
+ "model.transformer.blocks.34.attn_norm.weight": "model-00007-of-00008.safetensors",
233
+ "model.transformer.blocks.34.ff_norm.weight": "model-00007-of-00008.safetensors",
234
+ "model.transformer.blocks.34.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
235
+ "model.transformer.blocks.34.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
236
+ "model.transformer.blocks.34.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
237
+ "model.transformer.blocks.34.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
238
+ "model.transformer.blocks.34.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
239
+ "model.transformer.blocks.34.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
240
+ "model.transformer.blocks.35.attn_norm.weight": "model-00007-of-00008.safetensors",
241
+ "model.transformer.blocks.35.ff_norm.weight": "model-00007-of-00008.safetensors",
242
+ "model.transformer.blocks.35.mlp.ff_out.weight": "model-00007-of-00008.safetensors",
243
+ "model.transformer.blocks.35.mlp.ff_proj.weight": "model-00007-of-00008.safetensors",
244
+ "model.transformer.blocks.35.self_attn.att_proj.weight": "model-00007-of-00008.safetensors",
245
+ "model.transformer.blocks.35.self_attn.attn_out.weight": "model-00007-of-00008.safetensors",
246
+ "model.transformer.blocks.35.self_attn.k_norm.weight": "model-00007-of-00008.safetensors",
247
+ "model.transformer.blocks.35.self_attn.q_norm.weight": "model-00007-of-00008.safetensors",
248
+ "model.transformer.blocks.4.attn_norm.weight": "model-00002-of-00008.safetensors",
249
+ "model.transformer.blocks.4.ff_norm.weight": "model-00002-of-00008.safetensors",
250
+ "model.transformer.blocks.4.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
251
+ "model.transformer.blocks.4.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
252
+ "model.transformer.blocks.4.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
253
+ "model.transformer.blocks.4.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
254
+ "model.transformer.blocks.4.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
255
+ "model.transformer.blocks.4.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
256
+ "model.transformer.blocks.5.attn_norm.weight": "model-00002-of-00008.safetensors",
257
+ "model.transformer.blocks.5.ff_norm.weight": "model-00002-of-00008.safetensors",
258
+ "model.transformer.blocks.5.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
259
+ "model.transformer.blocks.5.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
260
+ "model.transformer.blocks.5.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
261
+ "model.transformer.blocks.5.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
262
+ "model.transformer.blocks.5.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
263
+ "model.transformer.blocks.5.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
264
+ "model.transformer.blocks.6.attn_norm.weight": "model-00002-of-00008.safetensors",
265
+ "model.transformer.blocks.6.ff_norm.weight": "model-00002-of-00008.safetensors",
266
+ "model.transformer.blocks.6.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
267
+ "model.transformer.blocks.6.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
268
+ "model.transformer.blocks.6.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
269
+ "model.transformer.blocks.6.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
270
+ "model.transformer.blocks.6.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
271
+ "model.transformer.blocks.6.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
272
+ "model.transformer.blocks.7.attn_norm.weight": "model-00002-of-00008.safetensors",
273
+ "model.transformer.blocks.7.ff_norm.weight": "model-00002-of-00008.safetensors",
274
+ "model.transformer.blocks.7.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
275
+ "model.transformer.blocks.7.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
276
+ "model.transformer.blocks.7.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
277
+ "model.transformer.blocks.7.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
278
+ "model.transformer.blocks.7.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
279
+ "model.transformer.blocks.7.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
280
+ "model.transformer.blocks.8.attn_norm.weight": "model-00002-of-00008.safetensors",
281
+ "model.transformer.blocks.8.ff_norm.weight": "model-00002-of-00008.safetensors",
282
+ "model.transformer.blocks.8.mlp.ff_out.weight": "model-00002-of-00008.safetensors",
283
+ "model.transformer.blocks.8.mlp.ff_proj.weight": "model-00002-of-00008.safetensors",
284
+ "model.transformer.blocks.8.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
285
+ "model.transformer.blocks.8.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
286
+ "model.transformer.blocks.8.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
287
+ "model.transformer.blocks.8.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
288
+ "model.transformer.blocks.9.attn_norm.weight": "model-00002-of-00008.safetensors",
289
+ "model.transformer.blocks.9.ff_norm.weight": "model-00003-of-00008.safetensors",
290
+ "model.transformer.blocks.9.mlp.ff_out.weight": "model-00003-of-00008.safetensors",
291
+ "model.transformer.blocks.9.mlp.ff_proj.weight": "model-00003-of-00008.safetensors",
292
+ "model.transformer.blocks.9.self_attn.att_proj.weight": "model-00002-of-00008.safetensors",
293
+ "model.transformer.blocks.9.self_attn.attn_out.weight": "model-00002-of-00008.safetensors",
294
+ "model.transformer.blocks.9.self_attn.k_norm.weight": "model-00002-of-00008.safetensors",
295
+ "model.transformer.blocks.9.self_attn.q_norm.weight": "model-00002-of-00008.safetensors",
296
+ "model.transformer.ln_f.weight": "model-00007-of-00008.safetensors",
297
+ "model.transformer.wte.embedding": "model-00001-of-00008.safetensors",
298
+ "model.transformer.wte.new_embedding": "model-00001-of-00008.safetensors",
299
+ "model.vision_backbone.image_pooling_2d.wk.bias": "model-00007-of-00008.safetensors",
300
+ "model.vision_backbone.image_pooling_2d.wk.weight": "model-00007-of-00008.safetensors",
301
+ "model.vision_backbone.image_pooling_2d.wo.bias": "model-00007-of-00008.safetensors",
302
+ "model.vision_backbone.image_pooling_2d.wo.weight": "model-00007-of-00008.safetensors",
303
+ "model.vision_backbone.image_pooling_2d.wq.bias": "model-00007-of-00008.safetensors",
304
+ "model.vision_backbone.image_pooling_2d.wq.weight": "model-00007-of-00008.safetensors",
305
+ "model.vision_backbone.image_pooling_2d.wv.bias": "model-00007-of-00008.safetensors",
306
+ "model.vision_backbone.image_pooling_2d.wv.weight": "model-00007-of-00008.safetensors",
307
+ "model.vision_backbone.image_projector.w1.weight": "model-00007-of-00008.safetensors",
308
+ "model.vision_backbone.image_projector.w2.weight": "model-00007-of-00008.safetensors",
309
+ "model.vision_backbone.image_projector.w3.weight": "model-00007-of-00008.safetensors",
310
+ "model.vision_backbone.image_vit.patch_embedding.bias": "model-00007-of-00008.safetensors",
311
+ "model.vision_backbone.image_vit.patch_embedding.weight": "model-00007-of-00008.safetensors",
312
+ "model.vision_backbone.image_vit.positional_embedding": "model-00007-of-00008.safetensors",
313
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.bias": "model-00007-of-00008.safetensors",
314
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.weight": "model-00007-of-00008.safetensors",
315
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.bias": "model-00007-of-00008.safetensors",
316
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.weight": "model-00007-of-00008.safetensors",
317
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.bias": "model-00007-of-00008.safetensors",
318
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.weight": "model-00007-of-00008.safetensors",
319
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.bias": "model-00007-of-00008.safetensors",
320
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.weight": "model-00007-of-00008.safetensors",
321
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.bias": "model-00007-of-00008.safetensors",
322
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.weight": "model-00007-of-00008.safetensors",
323
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
324
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
325
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
326
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
327
+ "model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.bias": "model-00007-of-00008.safetensors",
328
+ "model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.weight": "model-00007-of-00008.safetensors",
329
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.bias": "model-00007-of-00008.safetensors",
330
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.weight": "model-00007-of-00008.safetensors",
331
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.bias": "model-00007-of-00008.safetensors",
332
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.weight": "model-00007-of-00008.safetensors",
333
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.bias": "model-00007-of-00008.safetensors",
334
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.weight": "model-00007-of-00008.safetensors",
335
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.bias": "model-00007-of-00008.safetensors",
336
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.weight": "model-00007-of-00008.safetensors",
337
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.bias": "model-00007-of-00008.safetensors",
338
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.weight": "model-00007-of-00008.safetensors",
339
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
340
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
341
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
342
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
343
+ "model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.bias": "model-00007-of-00008.safetensors",
344
+ "model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.weight": "model-00007-of-00008.safetensors",
345
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.bias": "model-00007-of-00008.safetensors",
346
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.weight": "model-00007-of-00008.safetensors",
347
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.bias": "model-00007-of-00008.safetensors",
348
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.weight": "model-00007-of-00008.safetensors",
349
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.bias": "model-00007-of-00008.safetensors",
350
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.weight": "model-00007-of-00008.safetensors",
351
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.bias": "model-00007-of-00008.safetensors",
352
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.weight": "model-00007-of-00008.safetensors",
353
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.bias": "model-00007-of-00008.safetensors",
354
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.weight": "model-00007-of-00008.safetensors",
355
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
356
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
357
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
358
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
359
+ "model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.bias": "model-00007-of-00008.safetensors",
360
+ "model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.weight": "model-00007-of-00008.safetensors",
361
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.bias": "model-00007-of-00008.safetensors",
362
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.weight": "model-00007-of-00008.safetensors",
363
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.bias": "model-00007-of-00008.safetensors",
364
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.weight": "model-00007-of-00008.safetensors",
365
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.bias": "model-00007-of-00008.safetensors",
366
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.weight": "model-00007-of-00008.safetensors",
367
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.bias": "model-00007-of-00008.safetensors",
368
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.weight": "model-00007-of-00008.safetensors",
369
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.bias": "model-00007-of-00008.safetensors",
370
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.weight": "model-00007-of-00008.safetensors",
371
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
372
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
373
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
374
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
375
+ "model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.bias": "model-00007-of-00008.safetensors",
376
+ "model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.weight": "model-00007-of-00008.safetensors",
377
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.bias": "model-00007-of-00008.safetensors",
378
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.weight": "model-00007-of-00008.safetensors",
379
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.bias": "model-00007-of-00008.safetensors",
380
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.weight": "model-00007-of-00008.safetensors",
381
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.bias": "model-00007-of-00008.safetensors",
382
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.weight": "model-00007-of-00008.safetensors",
383
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.bias": "model-00007-of-00008.safetensors",
384
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.weight": "model-00007-of-00008.safetensors",
385
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.bias": "model-00007-of-00008.safetensors",
386
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.weight": "model-00007-of-00008.safetensors",
387
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
388
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
389
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
390
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
391
+ "model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.bias": "model-00007-of-00008.safetensors",
392
+ "model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.weight": "model-00007-of-00008.safetensors",
393
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.bias": "model-00007-of-00008.safetensors",
394
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.weight": "model-00007-of-00008.safetensors",
395
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.bias": "model-00007-of-00008.safetensors",
396
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.weight": "model-00007-of-00008.safetensors",
397
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.bias": "model-00007-of-00008.safetensors",
398
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.weight": "model-00007-of-00008.safetensors",
399
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.bias": "model-00007-of-00008.safetensors",
400
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.weight": "model-00007-of-00008.safetensors",
401
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.bias": "model-00007-of-00008.safetensors",
402
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.weight": "model-00007-of-00008.safetensors",
403
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
404
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
405
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
406
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
407
+ "model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.bias": "model-00007-of-00008.safetensors",
408
+ "model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.weight": "model-00007-of-00008.safetensors",
409
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.bias": "model-00007-of-00008.safetensors",
410
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.weight": "model-00007-of-00008.safetensors",
411
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.bias": "model-00007-of-00008.safetensors",
412
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.weight": "model-00007-of-00008.safetensors",
413
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.bias": "model-00007-of-00008.safetensors",
414
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.weight": "model-00007-of-00008.safetensors",
415
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.bias": "model-00007-of-00008.safetensors",
416
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.weight": "model-00007-of-00008.safetensors",
417
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.bias": "model-00007-of-00008.safetensors",
418
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.weight": "model-00007-of-00008.safetensors",
419
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
420
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
421
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
422
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
423
+ "model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.bias": "model-00007-of-00008.safetensors",
424
+ "model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.weight": "model-00007-of-00008.safetensors",
425
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.bias": "model-00007-of-00008.safetensors",
426
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.weight": "model-00007-of-00008.safetensors",
427
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.bias": "model-00007-of-00008.safetensors",
428
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.weight": "model-00007-of-00008.safetensors",
429
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.bias": "model-00007-of-00008.safetensors",
430
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.weight": "model-00007-of-00008.safetensors",
431
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.bias": "model-00007-of-00008.safetensors",
432
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.weight": "model-00007-of-00008.safetensors",
433
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.bias": "model-00007-of-00008.safetensors",
434
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.weight": "model-00007-of-00008.safetensors",
435
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
436
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
437
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
438
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
439
+ "model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.bias": "model-00007-of-00008.safetensors",
440
+ "model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.weight": "model-00007-of-00008.safetensors",
441
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.bias": "model-00007-of-00008.safetensors",
442
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.weight": "model-00007-of-00008.safetensors",
443
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.bias": "model-00007-of-00008.safetensors",
444
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.weight": "model-00007-of-00008.safetensors",
445
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.bias": "model-00007-of-00008.safetensors",
446
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.weight": "model-00007-of-00008.safetensors",
447
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.bias": "model-00007-of-00008.safetensors",
448
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.weight": "model-00007-of-00008.safetensors",
449
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.bias": "model-00007-of-00008.safetensors",
450
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.weight": "model-00007-of-00008.safetensors",
451
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
452
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
453
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
454
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
455
+ "model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.bias": "model-00007-of-00008.safetensors",
456
+ "model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.weight": "model-00007-of-00008.safetensors",
457
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.bias": "model-00007-of-00008.safetensors",
458
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.weight": "model-00007-of-00008.safetensors",
459
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.bias": "model-00007-of-00008.safetensors",
460
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.weight": "model-00007-of-00008.safetensors",
461
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.bias": "model-00007-of-00008.safetensors",
462
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.weight": "model-00007-of-00008.safetensors",
463
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.bias": "model-00007-of-00008.safetensors",
464
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.weight": "model-00007-of-00008.safetensors",
465
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.bias": "model-00007-of-00008.safetensors",
466
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.weight": "model-00007-of-00008.safetensors",
467
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
468
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
469
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
470
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
471
+ "model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.bias": "model-00007-of-00008.safetensors",
472
+ "model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.weight": "model-00007-of-00008.safetensors",
473
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.bias": "model-00007-of-00008.safetensors",
474
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.weight": "model-00007-of-00008.safetensors",
475
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.bias": "model-00007-of-00008.safetensors",
476
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.weight": "model-00007-of-00008.safetensors",
477
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.bias": "model-00007-of-00008.safetensors",
478
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.weight": "model-00007-of-00008.safetensors",
479
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.bias": "model-00007-of-00008.safetensors",
480
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.weight": "model-00007-of-00008.safetensors",
481
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.bias": "model-00007-of-00008.safetensors",
482
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.weight": "model-00007-of-00008.safetensors",
483
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
484
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
485
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
486
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
487
+ "model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.bias": "model-00007-of-00008.safetensors",
488
+ "model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.weight": "model-00007-of-00008.safetensors",
489
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.bias": "model-00007-of-00008.safetensors",
490
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.weight": "model-00007-of-00008.safetensors",
491
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.bias": "model-00007-of-00008.safetensors",
492
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.weight": "model-00007-of-00008.safetensors",
493
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.bias": "model-00007-of-00008.safetensors",
494
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.weight": "model-00007-of-00008.safetensors",
495
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.bias": "model-00007-of-00008.safetensors",
496
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.weight": "model-00007-of-00008.safetensors",
497
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.bias": "model-00007-of-00008.safetensors",
498
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.weight": "model-00007-of-00008.safetensors",
499
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
500
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
501
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
502
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
503
+ "model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.bias": "model-00007-of-00008.safetensors",
504
+ "model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.weight": "model-00007-of-00008.safetensors",
505
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.bias": "model-00007-of-00008.safetensors",
506
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.weight": "model-00007-of-00008.safetensors",
507
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.bias": "model-00007-of-00008.safetensors",
508
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.weight": "model-00007-of-00008.safetensors",
509
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.bias": "model-00007-of-00008.safetensors",
510
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.weight": "model-00007-of-00008.safetensors",
511
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.bias": "model-00007-of-00008.safetensors",
512
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.weight": "model-00007-of-00008.safetensors",
513
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.bias": "model-00007-of-00008.safetensors",
514
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.weight": "model-00007-of-00008.safetensors",
515
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
516
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
517
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
518
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
519
+ "model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.bias": "model-00007-of-00008.safetensors",
520
+ "model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.weight": "model-00007-of-00008.safetensors",
521
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.bias": "model-00007-of-00008.safetensors",
522
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.weight": "model-00007-of-00008.safetensors",
523
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.bias": "model-00007-of-00008.safetensors",
524
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.weight": "model-00007-of-00008.safetensors",
525
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.bias": "model-00007-of-00008.safetensors",
526
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.weight": "model-00007-of-00008.safetensors",
527
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.bias": "model-00007-of-00008.safetensors",
528
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.weight": "model-00007-of-00008.safetensors",
529
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.bias": "model-00007-of-00008.safetensors",
530
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.weight": "model-00007-of-00008.safetensors",
531
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
532
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
533
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
534
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
535
+ "model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.bias": "model-00007-of-00008.safetensors",
536
+ "model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.weight": "model-00007-of-00008.safetensors",
537
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.bias": "model-00007-of-00008.safetensors",
538
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.weight": "model-00007-of-00008.safetensors",
539
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.bias": "model-00007-of-00008.safetensors",
540
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.weight": "model-00007-of-00008.safetensors",
541
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.bias": "model-00007-of-00008.safetensors",
542
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.weight": "model-00007-of-00008.safetensors",
543
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.bias": "model-00007-of-00008.safetensors",
544
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.weight": "model-00007-of-00008.safetensors",
545
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.bias": "model-00007-of-00008.safetensors",
546
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.weight": "model-00007-of-00008.safetensors",
547
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
548
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
549
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
550
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
551
+ "model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.bias": "model-00007-of-00008.safetensors",
552
+ "model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.weight": "model-00007-of-00008.safetensors",
553
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.bias": "model-00007-of-00008.safetensors",
554
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.weight": "model-00007-of-00008.safetensors",
555
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.bias": "model-00007-of-00008.safetensors",
556
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.weight": "model-00007-of-00008.safetensors",
557
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.bias": "model-00007-of-00008.safetensors",
558
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.weight": "model-00007-of-00008.safetensors",
559
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.bias": "model-00007-of-00008.safetensors",
560
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.weight": "model-00007-of-00008.safetensors",
561
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.bias": "model-00007-of-00008.safetensors",
562
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.weight": "model-00007-of-00008.safetensors",
563
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
564
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
565
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
566
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
567
+ "model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.bias": "model-00007-of-00008.safetensors",
568
+ "model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.weight": "model-00007-of-00008.safetensors",
569
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.bias": "model-00007-of-00008.safetensors",
570
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.weight": "model-00007-of-00008.safetensors",
571
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.bias": "model-00007-of-00008.safetensors",
572
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.weight": "model-00007-of-00008.safetensors",
573
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.bias": "model-00007-of-00008.safetensors",
574
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.weight": "model-00007-of-00008.safetensors",
575
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.bias": "model-00007-of-00008.safetensors",
576
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.weight": "model-00007-of-00008.safetensors",
577
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.bias": "model-00007-of-00008.safetensors",
578
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.weight": "model-00007-of-00008.safetensors",
579
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
580
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
581
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
582
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
583
+ "model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.bias": "model-00007-of-00008.safetensors",
584
+ "model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.weight": "model-00007-of-00008.safetensors",
585
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.bias": "model-00007-of-00008.safetensors",
586
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.weight": "model-00007-of-00008.safetensors",
587
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.bias": "model-00007-of-00008.safetensors",
588
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.weight": "model-00007-of-00008.safetensors",
589
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.bias": "model-00007-of-00008.safetensors",
590
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.weight": "model-00007-of-00008.safetensors",
591
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.bias": "model-00007-of-00008.safetensors",
592
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.weight": "model-00007-of-00008.safetensors",
593
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.bias": "model-00007-of-00008.safetensors",
594
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.weight": "model-00007-of-00008.safetensors",
595
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
596
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
597
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
598
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
599
+ "model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.bias": "model-00007-of-00008.safetensors",
600
+ "model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.weight": "model-00007-of-00008.safetensors",
601
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.bias": "model-00007-of-00008.safetensors",
602
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.weight": "model-00007-of-00008.safetensors",
603
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.bias": "model-00007-of-00008.safetensors",
604
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.weight": "model-00007-of-00008.safetensors",
605
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.bias": "model-00007-of-00008.safetensors",
606
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.weight": "model-00007-of-00008.safetensors",
607
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.bias": "model-00007-of-00008.safetensors",
608
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.weight": "model-00007-of-00008.safetensors",
609
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.bias": "model-00007-of-00008.safetensors",
610
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.weight": "model-00007-of-00008.safetensors",
611
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
612
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
613
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
614
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
615
+ "model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.bias": "model-00007-of-00008.safetensors",
616
+ "model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.weight": "model-00007-of-00008.safetensors",
617
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.bias": "model-00007-of-00008.safetensors",
618
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.weight": "model-00007-of-00008.safetensors",
619
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.bias": "model-00007-of-00008.safetensors",
620
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.weight": "model-00007-of-00008.safetensors",
621
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.bias": "model-00007-of-00008.safetensors",
622
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.weight": "model-00007-of-00008.safetensors",
623
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.bias": "model-00007-of-00008.safetensors",
624
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.weight": "model-00007-of-00008.safetensors",
625
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.bias": "model-00007-of-00008.safetensors",
626
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.weight": "model-00007-of-00008.safetensors",
627
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
628
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
629
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
630
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
631
+ "model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.bias": "model-00007-of-00008.safetensors",
632
+ "model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.weight": "model-00007-of-00008.safetensors",
633
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.bias": "model-00007-of-00008.safetensors",
634
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.weight": "model-00007-of-00008.safetensors",
635
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.bias": "model-00007-of-00008.safetensors",
636
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.weight": "model-00007-of-00008.safetensors",
637
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.bias": "model-00007-of-00008.safetensors",
638
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.weight": "model-00007-of-00008.safetensors",
639
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.bias": "model-00007-of-00008.safetensors",
640
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.weight": "model-00007-of-00008.safetensors",
641
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.bias": "model-00007-of-00008.safetensors",
642
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.weight": "model-00007-of-00008.safetensors",
643
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
644
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
645
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
646
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
647
+ "model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.bias": "model-00007-of-00008.safetensors",
648
+ "model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.weight": "model-00007-of-00008.safetensors",
649
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.bias": "model-00007-of-00008.safetensors",
650
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.weight": "model-00007-of-00008.safetensors",
651
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.bias": "model-00007-of-00008.safetensors",
652
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.weight": "model-00007-of-00008.safetensors",
653
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.bias": "model-00007-of-00008.safetensors",
654
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.weight": "model-00007-of-00008.safetensors",
655
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.bias": "model-00007-of-00008.safetensors",
656
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.weight": "model-00007-of-00008.safetensors",
657
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.bias": "model-00007-of-00008.safetensors",
658
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.weight": "model-00007-of-00008.safetensors",
659
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
660
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
661
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
662
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
663
+ "model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.bias": "model-00007-of-00008.safetensors",
664
+ "model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.weight": "model-00007-of-00008.safetensors",
665
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.bias": "model-00007-of-00008.safetensors",
666
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.weight": "model-00007-of-00008.safetensors",
667
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.bias": "model-00007-of-00008.safetensors",
668
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.weight": "model-00007-of-00008.safetensors",
669
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.bias": "model-00007-of-00008.safetensors",
670
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.weight": "model-00007-of-00008.safetensors",
671
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.bias": "model-00007-of-00008.safetensors",
672
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.weight": "model-00007-of-00008.safetensors",
673
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.bias": "model-00007-of-00008.safetensors",
674
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.weight": "model-00007-of-00008.safetensors",
675
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
676
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
677
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
678
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
679
+ "model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.bias": "model-00007-of-00008.safetensors",
680
+ "model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.weight": "model-00007-of-00008.safetensors",
681
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.bias": "model-00007-of-00008.safetensors",
682
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.weight": "model-00007-of-00008.safetensors",
683
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.bias": "model-00007-of-00008.safetensors",
684
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.weight": "model-00007-of-00008.safetensors",
685
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.bias": "model-00007-of-00008.safetensors",
686
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.weight": "model-00007-of-00008.safetensors",
687
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.bias": "model-00007-of-00008.safetensors",
688
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.weight": "model-00007-of-00008.safetensors",
689
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.bias": "model-00007-of-00008.safetensors",
690
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.weight": "model-00007-of-00008.safetensors",
691
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
692
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
693
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
694
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
695
+ "model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.bias": "model-00007-of-00008.safetensors",
696
+ "model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.weight": "model-00007-of-00008.safetensors",
697
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.bias": "model-00007-of-00008.safetensors",
698
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.weight": "model-00007-of-00008.safetensors",
699
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.bias": "model-00007-of-00008.safetensors",
700
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.weight": "model-00007-of-00008.safetensors",
701
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.bias": "model-00007-of-00008.safetensors",
702
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.weight": "model-00007-of-00008.safetensors",
703
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.bias": "model-00007-of-00008.safetensors",
704
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.weight": "model-00007-of-00008.safetensors",
705
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.bias": "model-00007-of-00008.safetensors",
706
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.weight": "model-00007-of-00008.safetensors",
707
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.bias": "model-00007-of-00008.safetensors",
708
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
709
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.bias": "model-00007-of-00008.safetensors",
710
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
711
+ "model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.bias": "model-00007-of-00008.safetensors",
712
+ "model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.weight": "model-00007-of-00008.safetensors"
713
+ }
714
+ }
modeling_molmo2.py ADDED
@@ -0,0 +1,1764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Union, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from transformers.models.auto import AutoModelForImageTextToText
11
+ from transformers.activations import ACT2FN
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.cache_utils import Cache, DynamicCache
14
+ from transformers.generation import GenerationMixin
15
+ from transformers.masking_utils import create_causal_mask, create_masks_for_generate
16
+ from transformers.modeling_flash_attention_utils import (
17
+ _flash_attention_forward,
18
+ FlashAttentionKwargs,
19
+ flash_attn_supports_top_left_mask,
20
+ )
21
+ from transformers.modeling_layers import GradientCheckpointingLayer
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ )
25
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
26
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
27
+ from transformers.processing_utils import Unpack
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ TransformersKwargs,
31
+ can_return_tuple,
32
+ logging,
33
+ )
34
+
35
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2TextConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ @dataclass
42
+ class Molmo2CausalLMOutputWithPast(ModelOutput):
43
+ """
44
+ Base class for Molmo2 causal language model (or autoregressive) outputs.
45
+
46
+ Args:
47
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
48
+ Language modeling loss (for next-token prediction).
49
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
50
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
51
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
52
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
53
+
54
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
55
+ `past_key_values` input) to speed up sequential decoding.
56
+ image_hidden_states (`torch.FloatTensor`, *optional*):
57
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
58
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
59
+ """
60
+
61
+ loss: Optional[torch.FloatTensor] = None
62
+ logits: Optional[torch.FloatTensor] = None
63
+ past_key_values: Optional[Cache] = None
64
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
65
+ attentions: Optional[tuple[torch.FloatTensor]] = None
66
+ image_hidden_states: Optional[torch.FloatTensor] = None
67
+
68
+
69
+ @dataclass
70
+ class Molmo2ModelOutputWithPast(BaseModelOutputWithPast):
71
+ """
72
+ Base class for Molmo2 outputs, with hidden states and attentions.
73
+
74
+ Args:
75
+ image_hidden_states (`torch.FloatTensor`, *optional*):
76
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
77
+ image_hidden_states of the model produced by the vision backbone
78
+ """
79
+ last_hidden_state: Optional[torch.FloatTensor] = None
80
+ past_key_values: Optional[Cache] = None
81
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
82
+ attentions: Optional[tuple[torch.FloatTensor]] = None
83
+ image_hidden_states: Optional[torch.FloatTensor] = None
84
+
85
+
86
+ class ViTMLP(nn.Module):
87
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
88
+ super().__init__()
89
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
90
+ self.act = ACT2FN[hidden_act]
91
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ return self.w2(self.act(self.w1(x)))
95
+
96
+
97
+ class ViTMultiHeadDotProductAttention(nn.Module):
98
+ def __init__(
99
+ self,
100
+ hidden_size: int,
101
+ num_heads: int,
102
+ num_key_value_heads: int,
103
+ head_dim: int,
104
+ use_bias: bool = True,
105
+ input_dim: Optional[int] = None,
106
+ float32_attention: bool = True,
107
+ attention_dropout: float = 0.0,
108
+ residual_dropout: float = 0.0,
109
+ device: Union[str, torch.device] = None,
110
+ attn_implementation: str = "eager",
111
+ ):
112
+ super().__init__()
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_heads = num_heads
116
+ self.head_dim = head_dim
117
+ self.num_key_value_heads = num_key_value_heads
118
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
119
+ self.attn_implementation = attn_implementation
120
+ self.is_causal = False
121
+
122
+ input_dim = input_dim or hidden_size
123
+
124
+ self.wq = nn.Linear(
125
+ input_dim,
126
+ self.num_heads * self.head_dim,
127
+ bias=use_bias,
128
+ device=device,
129
+ )
130
+ self.wk = nn.Linear(
131
+ input_dim,
132
+ self.num_key_value_heads * self.head_dim,
133
+ bias=use_bias,
134
+ device=device,
135
+ )
136
+ self.wv = nn.Linear(
137
+ input_dim,
138
+ self.num_key_value_heads * self.head_dim,
139
+ bias=use_bias,
140
+ device=device,
141
+ )
142
+ self.wo = nn.Linear(
143
+ self.num_heads * self.head_dim,
144
+ self.hidden_size,
145
+ )
146
+ self.float32_attention = float32_attention
147
+ self.attention_dropout = attention_dropout
148
+ self.residual_dropout = nn.Dropout(residual_dropout)
149
+
150
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
151
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
152
+
153
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
154
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
155
+
156
+ def forward(
157
+ self,
158
+ inputs_q: torch.Tensor,
159
+ inputs_kv: Optional[torch.Tensor] = None,
160
+ attn_mask: Optional[torch.Tensor] = None,
161
+ ) -> torch.Tensor:
162
+
163
+ if inputs_kv is not None:
164
+ inputs_k = inputs_kv
165
+ inputs_v = inputs_kv
166
+ else:
167
+ inputs_k = inputs_q
168
+ inputs_v = inputs_q
169
+
170
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
171
+
172
+ xq = self._split_heads(xq, self.num_heads)
173
+ xk = self._split_heads(xk, self.num_key_value_heads)
174
+ xv = self._split_heads(xv, self.num_key_value_heads)
175
+
176
+ if self.num_heads != self.num_key_value_heads:
177
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
178
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
179
+
180
+ og_dtype = xq.dtype
181
+
182
+ if self.float32_attention:
183
+ xq = xq.to(torch.float)
184
+ xk = xk.to(torch.float)
185
+
186
+ dropout_p = 0.0 if not self.training else self.attention_dropout
187
+
188
+ if self.attn_implementation == "eager":
189
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
190
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(xq.dtype)
191
+ attn_weights = F.dropout(
192
+ attn_weights,
193
+ p=dropout_p,
194
+ training=self.training
195
+ )
196
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
197
+
198
+ elif self.attn_implementation == "sdpa":
199
+ if not torch.is_autocast_enabled():
200
+ xv = xv.to(torch.float)
201
+
202
+ attn_output = F.scaled_dot_product_attention(
203
+ xq.transpose(1, 2).contiguous(),
204
+ xk.transpose(1, 2).contiguous(),
205
+ xv.transpose(1, 2).contiguous(),
206
+ attn_mask=attn_mask,
207
+ is_causal=False,
208
+ dropout_p=dropout_p,
209
+ ).transpose(1, 2)
210
+
211
+ elif self.attn_implementation == "flash_attention_2":
212
+ if xq.dtype == torch.float32:
213
+ if torch.is_autocast_enabled():
214
+ target_dtype = torch.get_autocast_gpu_dtype()
215
+ else:
216
+ target_dtype = self.wq.weight.dtype
217
+ attn_output = _flash_attention_forward(
218
+ xq,
219
+ xk,
220
+ xv,
221
+ attention_mask=attn_mask,
222
+ query_length=inputs_q.shape[1],
223
+ is_causal=False,
224
+ dropout=dropout_p,
225
+ softmax_scale=xq.shape[-1] ** -0.5,
226
+ use_top_left_mask=flash_attn_supports_top_left_mask(),
227
+ target_dtype=target_dtype,
228
+ implementation=self.attn_implementation,
229
+ )
230
+ else:
231
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
232
+
233
+ attn_output = attn_output.to(og_dtype)
234
+ attn_output = self._merge_heads(attn_output)
235
+ attn_output = self.wo(attn_output)
236
+ attn_output = self.residual_dropout(attn_output)
237
+
238
+ return attn_output
239
+
240
+
241
+ class Molmo2VisionBlock(nn.Module):
242
+
243
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
244
+ super().__init__()
245
+ self.attention = ViTMultiHeadDotProductAttention(
246
+ hidden_size=config.hidden_size,
247
+ num_heads=config.num_attention_heads,
248
+ num_key_value_heads=config.num_key_value_heads,
249
+ head_dim=config.head_dim,
250
+ float32_attention=config.float32_attention,
251
+ attention_dropout=config.attention_dropout,
252
+ residual_dropout=config.residual_dropout,
253
+ device=device,
254
+ attn_implementation=config._attn_implementation,
255
+ )
256
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
257
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
258
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
259
+
260
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
261
+ x = x + self.attention(self.attention_norm(x))
262
+ x = x + self.feed_forward(self.ffn_norm(x))
263
+ return x
264
+
265
+
266
+ class Molmo2VisionBlockCollection(nn.Module):
267
+
268
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
269
+ super().__init__()
270
+ self.conifg = config
271
+ self.resblocks = nn.ModuleList([
272
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
273
+ ])
274
+
275
+ def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
276
+ hidden_states = []
277
+ for r in self.resblocks:
278
+ x = r(x)
279
+ hidden_states.append(x)
280
+ return hidden_states
281
+
282
+
283
+ class Molmo2VisionTransformer(nn.Module):
284
+
285
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
286
+ super().__init__()
287
+ self.config = config
288
+
289
+ # positional embeddings
290
+ self.scale = config.hidden_size ** -0.5
291
+ self.num_prefix_tokens: int = 0 # no class embeddings
292
+ self.positional_embedding = nn.Parameter(
293
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
294
+ )
295
+
296
+ image_patch_size = config.image_patch_size
297
+ self.patch_embedding = nn.Linear(
298
+ image_patch_size * image_patch_size * 3,
299
+ config.hidden_size,
300
+ bias=True,
301
+ device=device,
302
+ )
303
+
304
+ self.transformer = Molmo2VisionBlockCollection(config, device)
305
+
306
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
307
+ pos_emb = self.positional_embedding
308
+
309
+ pos_emb = pos_emb.reshape(
310
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
311
+ )
312
+
313
+ (patch_num_0, patch_num_1) = patch_num
314
+
315
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
316
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
317
+ # antialias: default True in jax.image.resize
318
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
319
+ pos_emb = F.interpolate(
320
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
321
+ )
322
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
323
+
324
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
325
+ x = x + pos_emb[None, :, :].to(x.dtype)
326
+ return x
327
+
328
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> list[torch.Tensor]:
329
+ """
330
+ : param x: (batch_size, num_patch, n_pixels)
331
+ """
332
+ if patch_num is None:
333
+ patch_num = self.config.image_num_patch
334
+
335
+ B, N, D = x.shape
336
+
337
+ x = self.patch_embedding(x)
338
+
339
+ # class embeddings and positional embeddings
340
+ x = self.add_pos_emb(x, patch_num)
341
+
342
+ hidden_states = self.transformer(x)
343
+ return hidden_states
344
+
345
+
346
+ class ImageProjectorMLP(nn.Module):
347
+
348
+ def __init__(
349
+ self,
350
+ input_dim: int,
351
+ hidden_dim: int,
352
+ output_dim: int,
353
+ hidden_act: str,
354
+ device: Union[str, torch.device] = None,
355
+ ):
356
+ super().__init__()
357
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
358
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
359
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
360
+ self.act = ACT2FN[hidden_act]
361
+
362
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
363
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
364
+
365
+
366
+ class Molmo2VisionBackbone(nn.Module):
367
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
368
+ super().__init__()
369
+ self.vit_config = vit_config
370
+ self.adapter_config = adapter_config
371
+
372
+ self.vit_layers = []
373
+ for layer in adapter_config.vit_layers:
374
+ if layer >= 0:
375
+ self.vit_layers.append(layer)
376
+ else:
377
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
378
+
379
+ last_layer_needed = max(self.vit_layers) + 1
380
+ if last_layer_needed < vit_config.num_hidden_layers:
381
+ new_vit_config = deepcopy(vit_config)
382
+ new_vit_config.num_hidden_layers = last_layer_needed
383
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
384
+ else:
385
+ self.image_vit = Molmo2VisionTransformer(vit_config)
386
+
387
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
388
+
389
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
390
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
391
+ hidden_size=adapter_config.hidden_size,
392
+ num_heads=adapter_config.num_attention_heads,
393
+ num_key_value_heads=adapter_config.num_key_value_heads,
394
+ head_dim=adapter_config.head_dim,
395
+ input_dim=pool_dim,
396
+ float32_attention=adapter_config.float32_attention,
397
+ attention_dropout=adapter_config.attention_dropout,
398
+ residual_dropout=adapter_config.residual_dropout,
399
+ attn_implementation=adapter_config._attn_implementation,
400
+ )
401
+ self.image_projector = ImageProjectorMLP(
402
+ adapter_config.hidden_size,
403
+ adapter_config.intermediate_size,
404
+ adapter_config.text_hidden_size,
405
+ adapter_config.hidden_act,
406
+ )
407
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
408
+
409
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
410
+ """
411
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
412
+ """
413
+ B, T, N, D = images.shape
414
+ images = images.view(B * T, N, D)
415
+ image_features = self.image_vit(images)
416
+
417
+ features = []
418
+ for layer in self.vit_layers:
419
+ features.append(image_features[layer])
420
+ image_features = torch.cat(features, dim=-1)
421
+
422
+ if self.num_prefix_tokens > 0:
423
+ image_features = image_features[:, 1:]
424
+ image_features = image_features.view(B, T, N, -1)
425
+ return image_features
426
+
427
+ @property
428
+ def dtype(self) -> torch.dtype:
429
+ return self.image_vit.patch_embedding.weight.dtype
430
+
431
+ @property
432
+ def device(self) -> torch.device:
433
+ return self.image_vit.patch_embedding.weight.device
434
+
435
+ def forward(
436
+ self,
437
+ images: torch.Tensor,
438
+ pooled_patches_idx: torch.Tensor,
439
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
440
+
441
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
442
+ batch_size, num_image = images.shape[:2]
443
+ images = images.to(device=self.device, dtype=self.dtype)
444
+ image_features = self.encode_image(images)
445
+
446
+ image_features = self.image_feature_dropout(image_features)
447
+ dim = image_features.shape[-1]
448
+ valid = pooled_patches_idx >= 0
449
+ valid_token = torch.any(valid, -1)
450
+
451
+ # Use `pooled_patches_idx` to arange the features for image pooling
452
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
453
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
454
+
455
+ # Now [batch, num_high_res_features, pool_dim, dim]
456
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
457
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
458
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
459
+ if self.adapter_config.pooling_attention_mask:
460
+ attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
461
+ denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
462
+ denom = torch.where(denom == 0, 1, denom)
463
+ query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(to_pool.dtype)
464
+ else:
465
+ attn_mask = None
466
+ query = to_pool.mean(-2, keepdim=True)
467
+ pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
468
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
469
+
470
+ # MLP layer to map the feature.
471
+ pooled_features = self.image_projector(pooled_features)
472
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
473
+
474
+
475
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
476
+ def rotate_half(x):
477
+ """Rotates half the hidden dims of the input."""
478
+ x1 = x[..., : x.shape[-1] // 2]
479
+ x2 = x[..., x.shape[-1] // 2 :]
480
+ return torch.cat((-x2, x1), dim=-1)
481
+
482
+
483
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
484
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
485
+ """Applies Rotary Position Embedding to the query and key tensors.
486
+
487
+ Args:
488
+ q (`torch.Tensor`): The query tensor.
489
+ k (`torch.Tensor`): The key tensor.
490
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
491
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
492
+ position_ids (`torch.Tensor`, *optional*):
493
+ Deprecated and unused.
494
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
495
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
496
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
497
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
498
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
499
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
500
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
501
+ Returns:
502
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
503
+ """
504
+ cos = cos.unsqueeze(unsqueeze_dim)
505
+ sin = sin.unsqueeze(unsqueeze_dim)
506
+ q_embed = (q * cos) + (rotate_half(q) * sin)
507
+ k_embed = (k * cos) + (rotate_half(k) * sin)
508
+ return q_embed, k_embed
509
+
510
+
511
+ class Molmo2RotaryEmbedding(nn.Module):
512
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
513
+
514
+ def __init__(
515
+ self,
516
+ config: Molmo2TextConfig,
517
+ device: Union[str, torch.device] = None,
518
+ rope_type: Optional[str] = None,
519
+ ):
520
+ super().__init__()
521
+ if rope_type is not None:
522
+ self.rope_type = rope_type
523
+ elif hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
524
+ # BC: "rope_type" was originally "type"
525
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
526
+ else:
527
+ self.rope_type = "default"
528
+ self.max_seq_len_cached = config.max_position_embeddings
529
+ self.original_max_seq_len = config.max_position_embeddings
530
+
531
+ self.config = config
532
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
533
+
534
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
535
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
536
+ self.original_inv_freq = self.inv_freq
537
+
538
+ @torch.no_grad()
539
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
540
+ def forward(self, x, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
541
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
542
+ position_ids_expanded = position_ids[:, None, :].float()
543
+
544
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
545
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
546
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
547
+ emb = torch.cat((freqs, freqs), dim=-1)
548
+ cos = emb.cos() * self.attention_scaling
549
+ sin = emb.sin() * self.attention_scaling
550
+
551
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
552
+
553
+
554
+ class Molmo2RMSNorm(nn.Module):
555
+
556
+ def __init__(
557
+ self,
558
+ size: int,
559
+ eps: float = 1e-6,
560
+ device: Union[str, torch.device] = None,
561
+ ):
562
+ super().__init__()
563
+ self.weight = nn.Parameter(torch.ones(size, device=device))
564
+ self.eps = eps
565
+
566
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
567
+ with torch.autocast(enabled=False, device_type=x.device.type):
568
+ og_dtype = x.dtype
569
+ x = x.to(torch.float32)
570
+ variance = x.pow(2).mean(-1, keepdim=True)
571
+ x = x * torch.rsqrt(variance + self.eps)
572
+ x = x.to(og_dtype)
573
+
574
+ return self.weight * x
575
+
576
+ def extra_repr(self):
577
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
578
+
579
+
580
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
581
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
582
+ """
583
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
584
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
585
+ """
586
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
587
+ if n_rep == 1:
588
+ return hidden_states
589
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
590
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
591
+
592
+
593
+ def eager_attention_forward(
594
+ module: nn.Module,
595
+ query: torch.Tensor,
596
+ key: torch.Tensor,
597
+ value: torch.Tensor,
598
+ attention_mask: Optional[torch.Tensor],
599
+ scaling: float,
600
+ dropout: float = 0.0,
601
+ **kwargs,
602
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
603
+ key_states = repeat_kv(key, module.num_key_value_groups)
604
+ value_states = repeat_kv(value, module.num_key_value_groups)
605
+
606
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
607
+ if attention_mask is not None:
608
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
609
+ attn_weights = attn_weights + causal_mask
610
+
611
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
612
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
613
+ attn_output = torch.matmul(attn_weights, value_states)
614
+ attn_output = attn_output.transpose(1, 2).contiguous()
615
+
616
+ return attn_output, attn_weights
617
+
618
+
619
+ class Molmo2Attention(nn.Module):
620
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
621
+
622
+ def __init__(self, config: Molmo2TextConfig, layer_idx: int) -> None:
623
+ super().__init__()
624
+ self.config = config
625
+ self.layer_idx = layer_idx
626
+ self.num_heads = config.num_attention_heads
627
+ self.num_key_value_heads = config.num_key_value_heads
628
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
629
+ self.head_dim = config.head_dim
630
+ self.scaling = self.head_dim**-0.5
631
+ self.is_causal = True
632
+
633
+ self.fused_dims = (
634
+ config.num_attention_heads * config.head_dim,
635
+ config.head_dim * config.num_key_value_heads,
636
+ config.head_dim * config.num_key_value_heads,
637
+ )
638
+ self.att_proj = nn.Linear(
639
+ config.hidden_size,
640
+ sum(self.fused_dims),
641
+ bias=config.qkv_bias,
642
+ )
643
+
644
+ # Layer norms.
645
+ self.k_norm: Optional[Molmo2RMSNorm] = None
646
+ self.q_norm: Optional[Molmo2RMSNorm] = None
647
+ self.qk_norm_type: Optional[str] = None
648
+ if config.use_qk_norm:
649
+ k_norm_size = (
650
+ config.head_dim
651
+ if config.qk_norm_type == "qwen3" else
652
+ config.num_key_value_heads * config.head_dim
653
+ )
654
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
655
+ q_norm_size = (
656
+ config.head_dim
657
+ if config.qk_norm_type == "qwen3" else
658
+ config.num_attention_heads * config.head_dim
659
+ )
660
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
661
+ self.qk_norm_type = config.qk_norm_type
662
+
663
+ self.attention_dropout = config.attention_dropout
664
+
665
+ self.attn_out = nn.Linear(
666
+ config.head_dim * config.num_attention_heads,
667
+ config.hidden_size,
668
+ bias=False,
669
+ )
670
+
671
+ def forward(
672
+ self,
673
+ hidden_states: torch.Tensor,
674
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
675
+ attention_mask: Optional[torch.Tensor],
676
+ past_key_values: Optional[Cache] = None,
677
+ cache_position: Optional[torch.LongTensor] = None,
678
+ **kwargs: Unpack[FlashAttentionKwargs],
679
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
680
+ input_shape = hidden_states.shape[:-1]
681
+ hidden_shape = (*input_shape, -1, self.head_dim)
682
+
683
+ qkv = self.att_proj(hidden_states)
684
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
685
+ value_states = value_states.view(hidden_shape)
686
+
687
+ # Optionally apply layer norm to keys and queries.
688
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
689
+ query_states = self.q_norm(query_states)
690
+ key_states = self.k_norm(key_states)
691
+
692
+ query_states = query_states.view(hidden_shape)
693
+ key_states = key_states.view(hidden_shape)
694
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
695
+ query_states = self.q_norm(query_states)
696
+ key_states = self.k_norm(key_states)
697
+ query_states = query_states.transpose(1, 2)
698
+ key_states = key_states.transpose(1, 2)
699
+ value_states = value_states.transpose(1, 2)
700
+
701
+ cos, sin = position_embeddings
702
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
703
+
704
+ if past_key_values is not None:
705
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
706
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
707
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
708
+
709
+ attention_interface: Callable = eager_attention_forward
710
+ if self.config._attn_implementation != "eager":
711
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
712
+
713
+ attn_output, attn_weights = attention_interface(
714
+ self,
715
+ query_states,
716
+ key_states,
717
+ value_states,
718
+ attention_mask,
719
+ dropout=0.0 if not self.training else self.attention_dropout,
720
+ scaling=self.scaling,
721
+ **kwargs,
722
+ )
723
+
724
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
725
+ attn_output = self.attn_out(attn_output)
726
+ return attn_output, attn_weights
727
+
728
+
729
+ class LanguageModelMLP(nn.Module):
730
+
731
+ def __init__(
732
+ self,
733
+ input_dim: int,
734
+ intermediate_size: int,
735
+ hidden_act: str,
736
+ device: Union[str, torch.device] = None,
737
+ ):
738
+ super().__init__()
739
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
740
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
741
+ self.act = ACT2FN[hidden_act]
742
+
743
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
744
+ x = self.ff_proj(x)
745
+ x, gate = x.chunk(2, dim=-1)
746
+ x = self.act(gate) * x
747
+ x = self.ff_out(x)
748
+ return x
749
+
750
+
751
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
752
+
753
+ def __init__(
754
+ self,
755
+ config: Molmo2TextConfig,
756
+ layer_idx: Optional[int] = None,
757
+ device: Union[str, torch.device] = None
758
+ ):
759
+ super().__init__()
760
+ self.config = config
761
+
762
+ self.self_attn = Molmo2Attention(config, layer_idx)
763
+ self.attn_norm = Molmo2RMSNorm(
764
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
765
+ self.dropout = nn.Dropout(config.residual_dropout)
766
+ self.mlp = LanguageModelMLP(
767
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
768
+ self.ff_norm = Molmo2RMSNorm(
769
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
770
+
771
+ def forward(
772
+ self,
773
+ hidden_states: torch.Tensor,
774
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
775
+ attention_mask: Optional[torch.Tensor] = None,
776
+ position_ids: Optional[torch.LongTensor] = None,
777
+ past_key_values: Optional[Cache] = None,
778
+ output_attentions: Optional[bool] = False,
779
+ use_cache: Optional[bool] = False,
780
+ cache_position: Optional[torch.LongTensor] = None,
781
+ **kwargs: Unpack[TransformersKwargs],
782
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
783
+
784
+ residual = hidden_states
785
+ hidden_states = self.attn_norm(hidden_states)
786
+
787
+ # Self Attention
788
+ hidden_states, self_attn_weights = self.self_attn(
789
+ hidden_states=hidden_states,
790
+ position_embeddings=position_embeddings,
791
+ attention_mask=attention_mask,
792
+ position_ids=position_ids,
793
+ past_key_values=past_key_values,
794
+ output_attentions=output_attentions,
795
+ use_cache=use_cache,
796
+ cache_position=cache_position,
797
+ **kwargs,
798
+ )
799
+
800
+ hidden_states = residual + self.dropout(hidden_states)
801
+
802
+ # Fully Connected
803
+ residual = hidden_states
804
+ hidden_states = self.ff_norm(hidden_states)
805
+ hidden_states = self.mlp(hidden_states)
806
+
807
+ hidden_states = residual + self.dropout(hidden_states)
808
+
809
+ outputs = (hidden_states,)
810
+
811
+ if output_attentions:
812
+ outputs += (self_attn_weights,)
813
+
814
+ return outputs
815
+
816
+
817
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
818
+ def forward(
819
+ self,
820
+ hidden_states: torch.Tensor,
821
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
822
+ attention_mask: Optional[torch.Tensor] = None,
823
+ position_ids: Optional[torch.LongTensor] = None,
824
+ past_key_values: Optional[Cache] = None,
825
+ output_attentions: Optional[bool] = False,
826
+ use_cache: Optional[bool] = False,
827
+ cache_position: Optional[torch.LongTensor] = None,
828
+ **kwargs,
829
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
830
+
831
+ residual = hidden_states
832
+
833
+ # Self Attention
834
+ hidden_states, self_attn_weights = self.self_attn(
835
+ hidden_states=hidden_states,
836
+ position_embeddings=position_embeddings,
837
+ attention_mask=attention_mask,
838
+ position_ids=position_ids,
839
+ past_key_values=past_key_values,
840
+ output_attentions=output_attentions,
841
+ use_cache=use_cache,
842
+ cache_position=cache_position,
843
+ )
844
+ hidden_states = self.attn_norm(hidden_states)
845
+
846
+ hidden_states = residual + self.dropout(hidden_states)
847
+
848
+ # Fully Connected
849
+ residual = hidden_states
850
+ hidden_states = self.mlp(hidden_states)
851
+ hidden_states = self.ff_norm(hidden_states)
852
+
853
+ hidden_states = residual + self.dropout(hidden_states)
854
+
855
+ outputs = (hidden_states,)
856
+
857
+ if output_attentions:
858
+ outputs += (self_attn_weights,)
859
+
860
+ return outputs
861
+
862
+
863
+ class Molmo2Embedding(nn.Module):
864
+ def __init__(
865
+ self,
866
+ num_embeddings: int,
867
+ num_new_embeddings: int,
868
+ features: int,
869
+ device: Union[str, torch.device] = None,
870
+ ):
871
+ super().__init__()
872
+ self.embedding = nn.Parameter(
873
+ torch.zeros(num_embeddings, features, device=device),
874
+ )
875
+ self.new_embedding = nn.Parameter(
876
+ torch.zeros(num_new_embeddings, features, device=device),
877
+ )
878
+
879
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
880
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
881
+
882
+
883
+ class Molmo2PreTrainedModel(PreTrainedModel):
884
+ config: Molmo2Config
885
+ base_model_prefix = "model"
886
+ supports_gradient_checkpointing = True
887
+ _no_split_modules = [
888
+ "Molmo2DecoderLayer",
889
+ "Molmo2PostNormDecoderLayer",
890
+ "Molmo2VisionBlock",
891
+ "ViTMultiHeadDotProductAttention",
892
+ ]
893
+ _skip_keys_device_placement = "past_key_values"
894
+ _supports_flash_attn = True
895
+ _supports_sdpa = True
896
+
897
+ _can_compile_fullgraph = True
898
+ _supports_attention_backend = True
899
+ _can_record_outputs = {
900
+ "hidden_states": Molmo2DecoderLayer,
901
+ "attentions": Molmo2Attention,
902
+ }
903
+
904
+ def _init_weights(self, module):
905
+ std = self.config.initializer_range
906
+ if isinstance(module, (nn.Linear,)):
907
+ module.weight.data.normal_(mean=0.0, std=std)
908
+ if module.bias is not None:
909
+ module.bias.data.zero_()
910
+ elif isinstance(module, Molmo2Embedding):
911
+ module.embedding.data.normal_(mean=0.0, std=std)
912
+ module.new_embedding.data.normal_(mean=0.0, std=std)
913
+ elif isinstance(module, nn.Embedding):
914
+ module.weight.data.normal_(mean=0.0, std=std)
915
+ if module.padding_idx is not None:
916
+ module.weight.data[module.padding_idx].zero_()
917
+ elif isinstance(module, Molmo2RMSNorm):
918
+ module.weight.data.fill_(1.0)
919
+ elif isinstance(module, nn.LayerNorm):
920
+ module.weight.data.fill_(1.0)
921
+ if module.bias is not None:
922
+ module.bias.data.zero_()
923
+
924
+
925
+ class Molmo2TextModel(Molmo2PreTrainedModel):
926
+ config: Molmo2TextConfig
927
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
928
+
929
+ def __init__(self, config: Molmo2TextConfig):
930
+ super().__init__(config)
931
+ if config.additional_vocab_size is not None:
932
+ self.wte = Molmo2Embedding(
933
+ config.vocab_size,
934
+ config.additional_vocab_size,
935
+ config.hidden_size,
936
+ )
937
+ else:
938
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
939
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
940
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
941
+ self.blocks = nn.ModuleList(
942
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
943
+ )
944
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
945
+ if config.rope_scaling_layers is not None:
946
+ self.rotary_embs = nn.ModuleDict(
947
+ {
948
+ "default": Molmo2RotaryEmbedding(config, rope_type="default"),
949
+ "scaling": Molmo2RotaryEmbedding(config),
950
+ }
951
+ )
952
+ else:
953
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
954
+ self.gradient_checkpointing = False
955
+
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ def get_input_embeddings(self) -> torch.nn.Module:
960
+ return self.wte
961
+
962
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
963
+ self.wte = value
964
+
965
+ @can_return_tuple
966
+ def forward(
967
+ self,
968
+ input_ids: Optional[torch.LongTensor] = None,
969
+ attention_mask: Optional[torch.Tensor] = None,
970
+ position_ids: Optional[torch.LongTensor] = None,
971
+ past_key_values: Optional[Cache] = None,
972
+ inputs_embeds: Optional[torch.FloatTensor] = None,
973
+ use_cache: Optional[bool] = None,
974
+ output_attentions: Optional[bool] = None,
975
+ output_hidden_states: Optional[bool] = None,
976
+ cache_position: Optional[torch.LongTensor] = None,
977
+ **kwargs: Unpack[TransformersKwargs],
978
+ ) -> BaseModelOutputWithPast:
979
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
980
+ output_hidden_states = (
981
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
982
+ )
983
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
984
+
985
+ if (input_ids is None) ^ (inputs_embeds is not None):
986
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
987
+
988
+ if self.gradient_checkpointing and self.training and use_cache:
989
+ logger.warning_once(
990
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
991
+ )
992
+ use_cache = False
993
+
994
+ if inputs_embeds is None:
995
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
996
+ inputs_embeds = self.wte(input_ids)
997
+
998
+ # torch.jit.trace() doesn't support cache objects in the output
999
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1000
+ past_key_values = DynamicCache(config=self.config)
1001
+
1002
+ if cache_position is None:
1003
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1004
+ cache_position = torch.arange(
1005
+ past_seen_tokens,
1006
+ past_seen_tokens + inputs_embeds.shape[1],
1007
+ device=inputs_embeds.device,
1008
+ )
1009
+
1010
+ if position_ids is None:
1011
+ position_ids = cache_position.unsqueeze(0)
1012
+
1013
+ # It may already have been prepared by e.g. `generate`
1014
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1015
+ # Prepare mask arguments
1016
+ mask_kwargs = {
1017
+ "config": self.config,
1018
+ "input_embeds": inputs_embeds,
1019
+ "attention_mask": attention_mask,
1020
+ "cache_position": cache_position,
1021
+ "past_key_values": past_key_values,
1022
+ "position_ids": position_ids,
1023
+ }
1024
+
1025
+ # Create the mask
1026
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1027
+
1028
+ hidden_states = inputs_embeds
1029
+
1030
+ # create position embeddings to be shared across the decoder layers
1031
+ if self.config.rope_scaling_layers is not None:
1032
+ position_embeddings_mapping = {
1033
+ "default": self.rotary_embs["default"](hidden_states, position_ids),
1034
+ "scaling": self.rotary_embs["scaling"](hidden_states, position_ids),
1035
+ }
1036
+ else:
1037
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1038
+
1039
+ # decoder layers
1040
+ all_hidden_states = () if output_hidden_states else None
1041
+ all_self_attns = () if output_attentions else None
1042
+
1043
+ for layer_idx, decoder_block in enumerate(self.blocks[: self.config.num_hidden_layers]):
1044
+ if output_hidden_states:
1045
+ all_hidden_states += (hidden_states,)
1046
+
1047
+ if self.config.rope_scaling_layers is not None:
1048
+ position_embeddings_i = (
1049
+ position_embeddings_mapping["scaling"]
1050
+ if layer_idx in self.config.rope_scaling_layers
1051
+ else position_embeddings_mapping["default"]
1052
+ )
1053
+ else:
1054
+ position_embeddings_i = position_embeddings
1055
+
1056
+ layer_outputs = decoder_block(
1057
+ hidden_states,
1058
+ attention_mask=causal_mask_mapping,
1059
+ position_ids=position_ids,
1060
+ past_key_values=past_key_values,
1061
+ output_attentions=output_attentions,
1062
+ use_cache=use_cache,
1063
+ cache_position=cache_position,
1064
+ position_embeddings=position_embeddings_i,
1065
+ **kwargs,
1066
+ )
1067
+
1068
+ hidden_states = layer_outputs[0]
1069
+
1070
+ if output_attentions:
1071
+ all_self_attns += (layer_outputs[1],)
1072
+
1073
+ hidden_states = self.ln_f(hidden_states)
1074
+
1075
+ # add hidden states from the last decoder layer
1076
+ if output_hidden_states:
1077
+ all_hidden_states += (hidden_states,)
1078
+
1079
+ return BaseModelOutputWithPast(
1080
+ last_hidden_state=hidden_states,
1081
+ past_key_values=past_key_values,
1082
+ hidden_states=all_hidden_states,
1083
+ attentions=all_self_attns,
1084
+ )
1085
+
1086
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1087
+ def token_type_ids_mask_function(
1088
+ token_type_ids: Optional[torch.Tensor] = None,
1089
+ ) -> Optional[Callable]:
1090
+ """
1091
+ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
1092
+ not start and end indices.
1093
+ """
1094
+ # Do not return an additional mask in this case
1095
+ if token_type_ids is None:
1096
+ return None
1097
+
1098
+ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
1099
+ # If it's 1 for both query and key/value, we are in an image block
1100
+ # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
1101
+ # Since vmap doesn't support `if statement` we workaround it with `torch.where`
1102
+ safe_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
1103
+ token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_idx]
1104
+ token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
1105
+
1106
+ is_image_block = (token_type_ids[batch_idx, q_idx] == 1) & (token_type_ids_at_kv_idx == 1)
1107
+
1108
+ # This is bidirectional attention whenever we are dealing with image tokens
1109
+ return is_image_block & is_image_block
1110
+
1111
+ return inner_mask
1112
+
1113
+
1114
+ class Molmo2Model(Molmo2PreTrainedModel):
1115
+ base_model_prefix = ""
1116
+ _checkpoint_conversion_mapping = {}
1117
+ # Reference: fix gemma3 grad acc #37208
1118
+ accepts_loss_kwargs = False
1119
+ config: Molmo2Config
1120
+
1121
+
1122
+ def __init__(self, config: Molmo2Config):
1123
+ super().__init__(config)
1124
+ self.transformer: Molmo2TextModel = Molmo2TextModel(config.text_config)
1125
+ self.vision_backbone: Optional[Molmo2VisionBackbone] = None
1126
+ if config.vit_config is not None and config.adapter_config is not None:
1127
+ self.vision_backbone = Molmo2VisionBackbone(config.vit_config, config.adapter_config)
1128
+
1129
+ # Initialize weights and apply final processing
1130
+ self.post_init()
1131
+
1132
+ def get_input_embeddings(self) -> torch.nn.Module:
1133
+ return self.transformer.wte
1134
+
1135
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1136
+ self.transformer.wte = value
1137
+
1138
+ def set_decoder(self, decoder):
1139
+ self.transformer = decoder
1140
+
1141
+ def get_decoder(self):
1142
+ return self.transformer
1143
+
1144
+ @property
1145
+ def device(self) -> torch.device:
1146
+ return self.transformer.ln_f.weight.device
1147
+
1148
+ def build_batched_images(
1149
+ self,
1150
+ input_ids: torch.LongTensor,
1151
+ pixel_values: torch.Tensor,
1152
+ image_token_pooling: torch.Tensor,
1153
+ image_grids: torch.Tensor,
1154
+ image_num_crops: torch.Tensor,
1155
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1156
+ # 1) Count the number of images in each example
1157
+ raw_counts = (input_ids == self.config.image_end_token_id).sum(1) # [N]
1158
+ # Each image is represented by global view and high-res view
1159
+ # so we divide by 2 to get the number of images
1160
+ counts = raw_counts // 2
1161
+ N = counts.size(0)
1162
+ device = input_ids.device
1163
+
1164
+ # Total number of images in the batch
1165
+ num_images = int(counts.sum().item())
1166
+
1167
+ # Sanity check
1168
+ assert image_grids.size(0) == num_images, \
1169
+ f"Expected {num_images} image grids, but got {image_grids.size(0)}"
1170
+ assert image_num_crops.size(0) == num_images, \
1171
+ f"Expected {num_images} image num crops, but got {image_num_crops.size(0)}"
1172
+
1173
+ # 1-1) Compute per-image pooled patch count from image grids
1174
+ with torch.no_grad():
1175
+ first_prod = image_grids[:, :2].prod(dim=1) # [num_images]
1176
+ second_prod = image_grids[:, 2:].prod(dim=1) # [num_images]
1177
+ num_pooled_patches_per_image = (first_prod + second_prod).to(image_num_crops.dtype) # [num_images]
1178
+
1179
+ # pixel_values: [n_crops, n_patches, pixels_per_patch]
1180
+ n_crops, n_patches, pixels_per_patch = pixel_values.shape
1181
+
1182
+ # 2) Map each image index → example index
1183
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1184
+ example_ids_for_image = torch.arange(N, device=device).repeat_interleave(counts) # [num_images]
1185
+ assert example_ids_for_image.numel() == num_images
1186
+
1187
+ # 2-1) Compute crops_per_example by summing per-image crop counts
1188
+ crops_per_example = torch.zeros(
1189
+ N, dtype=image_num_crops.dtype, device=image_num_crops.device
1190
+ )
1191
+ crops_per_example.index_add_(0, example_ids_for_image, image_num_crops) # [N]
1192
+
1193
+ # 2-2) Per-image number of patches = (crops per image) * n_patches
1194
+ patches_per_image = image_num_crops * n_patches # [num_images]
1195
+
1196
+ # 2-3) Compute per-example per-image patch offsets
1197
+ counts_list = counts.tolist()
1198
+ index_offset_per_example_list = []
1199
+ offset_img = 0
1200
+ for c in counts_list:
1201
+ per_img_patches = patches_per_image[offset_img:offset_img + c] # [c]
1202
+ # Offsets: [0, img0_total_patches, img0+img1_total_patches, ...]
1203
+ index_offset = [0] + per_img_patches.cumsum(0).tolist()[:-1]
1204
+ index_offset_per_example_list.append(index_offset)
1205
+ offset_img += c
1206
+
1207
+ # 2-4) Compute num_pooled_patches_per_example
1208
+ num_pooled_patches_per_example = torch.zeros(
1209
+ N, dtype=num_pooled_patches_per_image.dtype, device=num_pooled_patches_per_image.device
1210
+ )
1211
+ num_pooled_patches_per_example.index_add_(
1212
+ 0, example_ids_for_image, num_pooled_patches_per_image
1213
+ )
1214
+
1215
+ # Sanity checks
1216
+ total_crops = int(crops_per_example.sum().item())
1217
+ assert total_crops == n_crops, \
1218
+ f"Expected {total_crops} crops, but got {n_crops}"
1219
+
1220
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1221
+ assert total_num_pooled_patches == image_token_pooling.size(0), \
1222
+ f"Expected {total_num_pooled_patches} pooled patches, but got {image_token_pooling.size(0)}"
1223
+
1224
+ # 3) Build images tensor filled with -1
1225
+ M = int(crops_per_example.max().item())
1226
+ images = torch.full(
1227
+ (N, M, n_patches, pixels_per_patch),
1228
+ fill_value=-1,
1229
+ dtype=pixel_values.dtype,
1230
+ device=pixel_values.device,
1231
+ )
1232
+
1233
+ # 4) Fill images with per-example slices from pixel_values
1234
+ offset_crop = 0
1235
+ for i in range(N):
1236
+ num = int(crops_per_example[i].item())
1237
+ cur = pixel_values[offset_crop:offset_crop + num] # [num, n_patches, pixels_per_patch]
1238
+ images[i, :num] = cur
1239
+ offset_crop += num
1240
+
1241
+ # Sanity check
1242
+ assert offset_crop == n_crops
1243
+
1244
+ # 5) Build new_token_pooling tensor filled with -1
1245
+ P = int(num_pooled_patches_per_example.max().item())
1246
+ _, dim = image_token_pooling.shape
1247
+ new_token_pooling = torch.full(
1248
+ (N, P, dim),
1249
+ fill_value=-1,
1250
+ dtype=image_token_pooling.dtype,
1251
+ device=image_token_pooling.device,
1252
+ )
1253
+
1254
+ # 6) Fill token_pooling with per-example slices, adding per-image patch offsets
1255
+ patch_offset = 0
1256
+ img_offset = 0
1257
+
1258
+ for i, c in enumerate(counts_list):
1259
+ num_patches = int(num_pooled_patches_per_example[i].item())
1260
+
1261
+ # Subsequence of pooled tokens belonging to this example
1262
+ cur = image_token_pooling[patch_offset:patch_offset + num_patches].clone() # [num_patches, dim]
1263
+
1264
+ index_offset_per_example = index_offset_per_example_list[i] # length = c
1265
+ per_img_pooled = num_pooled_patches_per_image[img_offset:img_offset + c] # [c]
1266
+
1267
+ assert len(index_offset_per_example) == per_img_pooled.numel()
1268
+
1269
+ # Apply per-image offsets to the (ragged) subsequence
1270
+ offset = 0
1271
+ for j in range(c):
1272
+ index_offset = int(index_offset_per_example[j])
1273
+ n = int(per_img_pooled[j].item())
1274
+ cur_slice = cur[offset:offset + n]
1275
+
1276
+ # Apply offset across all columns
1277
+ cur[offset:offset + n] = torch.where(
1278
+ cur_slice >= 0,
1279
+ cur_slice + index_offset,
1280
+ cur_slice,
1281
+ )
1282
+ offset += n
1283
+
1284
+ new_token_pooling[i, :num_patches] = cur
1285
+
1286
+ patch_offset += num_patches
1287
+ img_offset += c
1288
+
1289
+ # Final sanity checks
1290
+ assert patch_offset == total_num_pooled_patches
1291
+ assert img_offset == num_images
1292
+
1293
+ return images, new_token_pooling
1294
+
1295
+ def build_batched_videos(
1296
+ self,
1297
+ input_ids: torch.LongTensor,
1298
+ pixel_values_videos: torch.Tensor,
1299
+ video_token_pooling: torch.Tensor,
1300
+ video_grids: torch.Tensor,
1301
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1302
+
1303
+ # 1) Count the number of videos in each example
1304
+ if self.config.use_frame_special_tokens:
1305
+ end_token_id = self.config.frame_end_token_id
1306
+ else:
1307
+ end_token_id = self.config.image_end_token_id
1308
+ counts = (input_ids == end_token_id).any(dim=1).long() # [N]
1309
+ N = counts.size(0)
1310
+ device = input_ids.device
1311
+
1312
+ # Total number of videos in the batch
1313
+ num_videos = int(counts.sum().item())
1314
+
1315
+ # Sanity check
1316
+ assert video_grids.size(0) == num_videos, \
1317
+ f"Expected {num_videos} videos, but got {video_grids.size(0)}"
1318
+
1319
+ video_num_frames = video_grids[:, 0] # [num_videos]
1320
+ num_pooled_patches_per_video = video_grids.prod(dim=1) # [num_videos]
1321
+
1322
+ # pixel_values_videos: [n_frames, n_patches, pixels_per_patch]
1323
+ n_frames, n_patches, pixels_per_patch = pixel_values_videos.shape
1324
+
1325
+ # 2) Map each video index -> example index
1326
+ # Example: if counts = [2, 1, 3], then this becomes [0,0,1,2,2,2]
1327
+ example_ids_for_video = torch.arange(N, device=device).repeat_interleave(counts) # [num_videos]
1328
+ assert example_ids_for_video.numel() == num_videos
1329
+
1330
+ # 2-1) Compute frames_per_example by summing per-video frame counts
1331
+ frames_per_example = torch.zeros(
1332
+ N, dtype=video_num_frames.dtype, device=device,
1333
+ )
1334
+ frames_per_example.index_add_(0, example_ids_for_video, video_num_frames) # [N]
1335
+
1336
+ # 2-2) Compute num_pooled_patches_per_example
1337
+ num_pooled_patches_per_example = torch.zeros(
1338
+ N, dtype=num_pooled_patches_per_video.dtype, device=num_pooled_patches_per_video.device,
1339
+ )
1340
+ num_pooled_patches_per_example.index_add_(
1341
+ 0, example_ids_for_video, num_pooled_patches_per_video,
1342
+ )
1343
+
1344
+ # Sanity checks
1345
+ total_frames = int(frames_per_example.sum().item())
1346
+ assert total_frames == n_frames, \
1347
+ f"Expected {total_frames} frames, but got {n_frames}"
1348
+
1349
+ total_num_pooled_patches = int(num_pooled_patches_per_example.sum().item())
1350
+ assert total_num_pooled_patches == video_token_pooling.size(0), \
1351
+ f"Expected {total_num_pooled_patches} pooled patches, but got {video_token_pooling.size(0)}"
1352
+
1353
+ # 3) Build videos tensor filled with -1
1354
+ M = int(frames_per_example.max().item())
1355
+ videos = torch.full(
1356
+ (N, M, n_patches, pixels_per_patch),
1357
+ fill_value=-1,
1358
+ dtype=pixel_values_videos.dtype,
1359
+ device=device,
1360
+ )
1361
+
1362
+ # 4) Fill videos with per-examples slices from pixel_values_videos
1363
+ offset_frame = 0
1364
+ for i in range(N):
1365
+ num = int(frames_per_example[i].item())
1366
+ cur = pixel_values_videos[offset_frame:offset_frame + num] # [num, n_patches, pixels_per_patch]
1367
+ videos[i, :num] = cur
1368
+ offset_frame += num
1369
+
1370
+ # Sanity check
1371
+ assert offset_frame == n_frames
1372
+
1373
+ # 5) Build new token_pooling tensor filled with -1
1374
+ P = int(num_pooled_patches_per_example.max().item())
1375
+ _, dim = video_token_pooling.shape
1376
+ new_token_pooling = torch.full(
1377
+ (N, P, dim),
1378
+ fill_value=-1,
1379
+ dtype=video_token_pooling.dtype,
1380
+ device=video_token_pooling.device,
1381
+ )
1382
+
1383
+ # 6) Fill new token_pooling with per-examples slices from video_token_pooling
1384
+ patch_offset = 0
1385
+ for i in range(N):
1386
+ num_patches = int(num_pooled_patches_per_example[i].item())
1387
+ cur = video_token_pooling[patch_offset:patch_offset + num_patches] # [num_patches, dim]
1388
+ new_token_pooling[i, :num_patches] = cur
1389
+ patch_offset += num_patches
1390
+
1391
+ # Final sanity checks
1392
+ assert patch_offset == total_num_pooled_patches
1393
+
1394
+ return videos, new_token_pooling
1395
+
1396
+ def merge_visual_inputs(
1397
+ self,
1398
+ input_ids: Optional[torch.LongTensor] = None,
1399
+ pixel_values: Optional[torch.Tensor] = None,
1400
+ image_token_pooling: Optional[torch.Tensor] = None,
1401
+ image_grids: Optional[torch.Tensor] = None,
1402
+ image_num_crops: Optional[torch.Tensor] = None,
1403
+ pixel_values_videos: Optional[torch.Tensor] = None,
1404
+ video_token_pooling: Optional[torch.Tensor] = None,
1405
+ video_grids: Optional[torch.Tensor] = None,
1406
+ ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
1407
+ if pixel_values is not None and pixel_values_videos is not None:
1408
+ raise ValueError("pixel_values and pixel_values_videos are provided at the same time")
1409
+ elif pixel_values is not None:
1410
+ assert input_ids is not None
1411
+ images, token_pooling = self.build_batched_images(
1412
+ input_ids=input_ids,
1413
+ pixel_values=pixel_values,
1414
+ image_token_pooling=image_token_pooling,
1415
+ image_grids=image_grids,
1416
+ image_num_crops=image_num_crops,
1417
+ )
1418
+ elif pixel_values_videos is not None:
1419
+ assert input_ids is not None
1420
+ images, token_pooling = self.build_batched_videos(
1421
+ input_ids=input_ids,
1422
+ pixel_values_videos=pixel_values_videos,
1423
+ video_token_pooling=video_token_pooling,
1424
+ video_grids=video_grids,
1425
+ )
1426
+ else:
1427
+ images, token_pooling = None, None
1428
+ return images, token_pooling
1429
+
1430
+ def build_input_embeddings(
1431
+ self,
1432
+ input_ids: torch.LongTensor,
1433
+ images: Optional[torch.FloatTensor] = None, # image inputs
1434
+ token_pooling: Optional[torch.LongTensor] = None,
1435
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
1436
+
1437
+ # Get embeddings of input.
1438
+ # shape: (batch_size, seq_len, d_model)
1439
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1440
+ x = self.transformer.wte(input_ids)
1441
+
1442
+ image_features: Optional[torch.FloatTensor] = None
1443
+ if images is not None:
1444
+ image_features = self.vision_backbone(images, token_pooling).to(x.device)
1445
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1446
+ assert is_image_patch.sum() == len(image_features)
1447
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1448
+
1449
+ # shape: (batch_size, seq_len, d_model)
1450
+ x = self.transformer.emb_drop(x) # type: ignore
1451
+
1452
+ return x, image_features
1453
+
1454
+ @can_return_tuple
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.LongTensor] = None,
1458
+ pixel_values: Optional[torch.FloatTensor] = None,
1459
+ image_token_pooling: Optional[torch.Tensor] = None,
1460
+ image_grids: Optional[torch.Tensor] = None,
1461
+ image_num_crops: Optional[torch.Tensor] = None,
1462
+ pixel_values_videos: Optional[torch.Tensor] = None,
1463
+ video_token_pooling: Optional[torch.Tensor] = None,
1464
+ video_grids: Optional[torch.Tensor] = None,
1465
+ attention_mask: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.Tensor] = None,
1467
+ past_key_values: Optional[Cache] = None,
1468
+ token_type_ids: Optional[torch.LongTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ use_cache: Optional[bool] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ cache_position: Optional[torch.LongTensor] = None,
1474
+ **kwargs: Unpack[TransformersKwargs],
1475
+ ) -> Union[tuple, Molmo2ModelOutputWithPast]:
1476
+
1477
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1478
+ output_hidden_states = (
1479
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1480
+ )
1481
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1482
+
1483
+ if (input_ids is None) ^ (inputs_embeds is not None):
1484
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1485
+
1486
+ images, token_pooling = self.merge_visual_inputs(
1487
+ input_ids=input_ids,
1488
+ pixel_values=pixel_values,
1489
+ image_token_pooling=image_token_pooling,
1490
+ image_grids=image_grids,
1491
+ image_num_crops=image_num_crops,
1492
+ pixel_values_videos=pixel_values_videos,
1493
+ video_token_pooling=video_token_pooling,
1494
+ video_grids=video_grids,
1495
+ )
1496
+
1497
+ if images is not None and inputs_embeds is not None:
1498
+ raise ValueError(
1499
+ "You cannot specify both images and inputs_embeds at the same time."
1500
+ )
1501
+
1502
+ if inputs_embeds is None:
1503
+ inputs_embeds, image_features = self.build_input_embeddings(
1504
+ input_ids, images, token_pooling,
1505
+ )
1506
+
1507
+ if cache_position is None:
1508
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1509
+ cache_position = torch.arange(
1510
+ past_seen_tokens,
1511
+ past_seen_tokens + inputs_embeds.shape[1],
1512
+ device=inputs_embeds.device,
1513
+ )
1514
+
1515
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1516
+ # It may already have been prepared by e.g. `generate`
1517
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
1518
+ # Prepare mask arguments
1519
+ mask_kwargs = {
1520
+ "config": self.config.get_text_config(),
1521
+ "input_embeds": inputs_embeds,
1522
+ "attention_mask": attention_mask,
1523
+ "cache_position": cache_position,
1524
+ "past_key_values": past_key_values,
1525
+ "position_ids": position_ids,
1526
+ }
1527
+
1528
+ # NOTE: this `is_prefill` logic is not flawless, it fails when we're using a cache eagerly initialized
1529
+ # (e.g. compiled prefill) AND `images` are not provided. Determining prefill in that case requires
1530
+ # checking data values, which is not compile-compatible.
1531
+ is_prefill = (
1532
+ not use_cache
1533
+ or past_key_values is None
1534
+ or not past_key_values.is_initialized
1535
+ or images is not None
1536
+ )
1537
+ if token_type_ids is not None and is_prefill:
1538
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1539
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1540
+ token_type_ids.to(cache_position.device)
1541
+ )
1542
+
1543
+ # Create the mask
1544
+ causal_mask_mapping = create_causal_mask(**mask_kwargs)
1545
+
1546
+ outputs = self.transformer(
1547
+ attention_mask=causal_mask_mapping,
1548
+ position_ids=position_ids,
1549
+ past_key_values=past_key_values,
1550
+ inputs_embeds=inputs_embeds,
1551
+ use_cache=use_cache,
1552
+ output_attentions=output_attentions,
1553
+ output_hidden_states=output_hidden_states,
1554
+ cache_position=cache_position,
1555
+ **kwargs,
1556
+ )
1557
+
1558
+ return Molmo2ModelOutputWithPast(
1559
+ last_hidden_state=outputs.last_hidden_state,
1560
+ past_key_values=outputs.past_key_values,
1561
+ hidden_states=outputs.hidden_states,
1562
+ attentions=outputs.attentions,
1563
+ image_hidden_states=image_features if images is not None else None,
1564
+ )
1565
+
1566
+
1567
+ class Molmo2ForConditionalGeneration(Molmo2PreTrainedModel, GenerationMixin):
1568
+ _checkpoint_conversion_mapping = {}
1569
+ _tied_weights_keys = [] # Weights are not tied
1570
+ # Reference: fix gemma3 grad acc #37208
1571
+ accepts_loss_kwargs = False
1572
+ config: Molmo2Config
1573
+
1574
+ def __init__(self, config: Molmo2Config):
1575
+ super().__init__(config)
1576
+
1577
+ self.model = Molmo2Model(config)
1578
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1579
+ self.vocab_size = config.vocab_size
1580
+
1581
+ # Initialize weights and apply final processing
1582
+ self.post_init()
1583
+
1584
+ def get_input_embeddings(self) -> torch.nn.Module:
1585
+ return self.model.transformer.wte
1586
+
1587
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1588
+ self.model.transformer.wte = value
1589
+
1590
+ def set_decoder(self, decoder):
1591
+ self.model.set_decoder(decoder)
1592
+
1593
+ def get_decoder(self):
1594
+ return self.model.get_decoder()
1595
+
1596
+ # Make modules available throught conditional class for BC
1597
+ @property
1598
+ def language_model(self) -> torch.nn.Module:
1599
+ return self.model.transformer
1600
+
1601
+ @property
1602
+ def vision_backbone(self) -> torch.nn.Module:
1603
+ return self.model.vision_backbone
1604
+
1605
+ @can_return_tuple
1606
+ def forward(
1607
+ self,
1608
+ input_ids: torch.LongTensor = None,
1609
+ pixel_values: Optional[torch.Tensor] = None,
1610
+ image_token_pooling: Optional[torch.Tensor] = None,
1611
+ image_grids: Optional[torch.Tensor] = None,
1612
+ image_num_crops: Optional[torch.Tensor] = None,
1613
+ pixel_values_videos: Optional[torch.Tensor] = None,
1614
+ video_token_pooling: Optional[torch.Tensor] = None,
1615
+ video_grids: Optional[torch.Tensor] = None,
1616
+ attention_mask: Optional[torch.Tensor] = None,
1617
+ position_ids: Optional[torch.LongTensor] = None,
1618
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1619
+ token_type_ids: Optional[torch.LongTensor] = None,
1620
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1621
+ labels: Optional[torch.LongTensor] = None,
1622
+ use_cache: Optional[bool] = None,
1623
+ output_attentions: Optional[bool] = None,
1624
+ output_hidden_states: Optional[bool] = None,
1625
+ cache_position: Optional[torch.LongTensor] = None,
1626
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1627
+ **kwargs: Unpack[TransformersKwargs],
1628
+ ) -> Union[tuple, Molmo2CausalLMOutputWithPast]:
1629
+ r"""
1630
+ ```python
1631
+ >>> from PIL import Image
1632
+ >>> import requests
1633
+ >>> from transformers import AutoProcessor, Molmo2ForConditionalGeneration
1634
+
1635
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1636
+ >>> processor = AutoProcessor.from_pretrained("...")
1637
+
1638
+ >>> prompt = "What's the content of the image?"
1639
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1640
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1641
+
1642
+ >>> messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image", "image": image}]}]
1643
+
1644
+ >>> inputs = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True)
1645
+
1646
+ >>> # Generate
1647
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1648
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1649
+ >>> processor.post_process_image_text_to_text(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1650
+ "The image shows a bustling street scene in what appears to be a Chinatown area. There's ..."
1651
+ ```"""
1652
+ outputs = self.model(
1653
+ input_ids=input_ids,
1654
+ pixel_values=pixel_values,
1655
+ image_token_pooling=image_token_pooling,
1656
+ image_grids=image_grids,
1657
+ image_num_crops=image_num_crops,
1658
+ pixel_values_videos=pixel_values_videos,
1659
+ video_token_pooling=video_token_pooling,
1660
+ video_grids=video_grids,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ token_type_ids=token_type_ids,
1665
+ inputs_embeds=inputs_embeds,
1666
+ use_cache=use_cache,
1667
+ output_attentions=output_attentions,
1668
+ output_hidden_states=output_hidden_states,
1669
+ cache_position=cache_position,
1670
+ **kwargs,
1671
+ )
1672
+
1673
+ hidden_states = outputs.last_hidden_state
1674
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1675
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1676
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1677
+
1678
+ loss = None
1679
+ if labels is not None:
1680
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1681
+
1682
+ return Molmo2CausalLMOutputWithPast(
1683
+ loss=loss,
1684
+ logits=logits,
1685
+ past_key_values=outputs.past_key_values,
1686
+ hidden_states=outputs.hidden_states,
1687
+ attentions=outputs.attentions,
1688
+ image_hidden_states=outputs.image_hidden_states,
1689
+ )
1690
+
1691
+ def prepare_inputs_for_generation(
1692
+ self,
1693
+ input_ids: torch.LongTensor,
1694
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
1695
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1696
+ pixel_values: Optional[torch.FloatTensor] = None,
1697
+ image_token_pooling: Optional[torch.Tensor] = None,
1698
+ image_grids: Optional[torch.Tensor] = None,
1699
+ image_num_crops: Optional[torch.Tensor] = None,
1700
+ pixel_values_videos: Optional[torch.Tensor] = None,
1701
+ video_token_pooling: Optional[torch.Tensor] = None,
1702
+ video_grids: Optional[torch.Tensor] = None,
1703
+ attention_mask: Optional[torch.Tensor] = None,
1704
+ token_type_ids: Optional[torch.LongTensor] = None,
1705
+ cache_position: Optional[torch.LongTensor] = None,
1706
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1707
+ **kwargs,
1708
+ ):
1709
+
1710
+ model_inputs = super().prepare_inputs_for_generation(
1711
+ input_ids,
1712
+ past_key_values=past_key_values,
1713
+ inputs_embeds=inputs_embeds,
1714
+ attention_mask=attention_mask,
1715
+ cache_position=cache_position,
1716
+ logits_to_keep=logits_to_keep,
1717
+ token_type_ids=token_type_ids,
1718
+ **kwargs,
1719
+ )
1720
+
1721
+ if cache_position[0] == 0:
1722
+ model_inputs["pixel_values"] = pixel_values
1723
+ model_inputs["image_token_pooling"] = image_token_pooling
1724
+ model_inputs["image_grids"] = image_grids
1725
+ model_inputs["image_num_crops"] = image_num_crops
1726
+ model_inputs["pixel_values_videos"] = pixel_values_videos
1727
+ model_inputs["video_token_pooling"] = video_token_pooling
1728
+ model_inputs["video_grids"] = video_grids
1729
+
1730
+ return model_inputs
1731
+
1732
+ # Adapted from transformers.models.gemma3.modeling_gemma3
1733
+ @staticmethod
1734
+ def create_masks_for_generate(
1735
+ config: PretrainedConfig,
1736
+ input_embeds: torch.Tensor,
1737
+ attention_mask: Optional[torch.Tensor],
1738
+ cache_position: torch.Tensor,
1739
+ past_key_values: Optional[Cache],
1740
+ position_ids: Optional[torch.Tensor],
1741
+ token_type_ids: Optional[torch.Tensor] = None,
1742
+ **kwargs,
1743
+ ) -> dict:
1744
+ # Prepare mask arguments
1745
+ mask_kwargs = {
1746
+ "config": config.get_text_config(),
1747
+ "input_embeds": input_embeds,
1748
+ "attention_mask": attention_mask,
1749
+ "cache_position": cache_position,
1750
+ "past_key_values": past_key_values,
1751
+ "position_ids": position_ids,
1752
+ }
1753
+ # Add the token type ids mask for generate as well
1754
+ if token_type_ids is not None and input_embeds.shape[1] != 1:
1755
+ # We need to pass an additional mask function to account for token type ids, and it needs to be an `or`
1756
+ mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
1757
+ token_type_ids.to(cache_position.device)
1758
+ )
1759
+
1760
+ return create_masks_for_generate(**mask_kwargs)
1761
+
1762
+
1763
+ # Always register for multi-modal features
1764
+ AutoModelForImageTextToText.register(Molmo2Config, Molmo2ForConditionalGeneration)
preprocessor_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "image_processing_molmo2.Molmo2ImageProcessor",
4
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
5
+ },
6
+ "do_convert_rgb": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Molmo2ImageProcessor",
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "max_crops": 8,
19
+ "overlap_margins": [
20
+ 4,
21
+ 4
22
+ ],
23
+ "patch_size": 14,
24
+ "pooling_size": [
25
+ 2,
26
+ 2
27
+ ],
28
+ "processor_class": "Molmo2Processor",
29
+ "resample": 2,
30
+ "size": {
31
+ "height": 378,
32
+ "width": 378
33
+ }
34
+ }
processing_molmo2.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import Optional, Union
5
+ import dataclasses
6
+
7
+ import numpy as np
8
+
9
+ from transformers.image_utils import ImageInput
10
+ from transformers.video_utils import VideoInput
11
+ from transformers.processing_utils import (
12
+ Unpack,
13
+ ProcessingKwargs,
14
+ ProcessorMixin,
15
+ )
16
+ from transformers.feature_extraction_utils import BatchFeature
17
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
18
+ from transformers.utils import logging
19
+
20
+ from transformers import AutoTokenizer
21
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
22
+ from .video_processing_molmo2 import Molmo2VideoProcessorKwargs, Molmo2VideoProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
29
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
30
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
31
+ IM_START_TOKEN = f"<im_start>"
32
+ LOW_RES_IMAGE_START_TOKEN = f"<low_res_im_start>"
33
+ FRAME_START_TOKEN = f"<frame_start>"
34
+ IM_END_TOKEN = f"<im_end>"
35
+ FRAME_END_TOKEN= f"<frame_end>"
36
+ IM_COL_TOKEN = f"<im_col>"
37
+ IMAGE_PROMPT = "<|image|>"
38
+ VIDEO_PROMPT = "<|video|>"
39
+
40
+ IMAGE_TOKENS = [
41
+ IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN,
43
+ IM_START_TOKEN,
44
+ LOW_RES_IMAGE_START_TOKEN,
45
+ FRAME_START_TOKEN,
46
+ IM_END_TOKEN,
47
+ FRAME_END_TOKEN,
48
+ IMAGE_LOW_RES_TOKEN,
49
+ ]
50
+
51
+
52
+ class Molmo2ProcessorKwargs(ProcessingKwargs, total=False):
53
+ """Molmo2 processor kwargs"""
54
+ images_kwargs: Molmo2ImagesKwargs
55
+ videos_kwargs: Molmo2VideoProcessorKwargs
56
+ _defaults = {
57
+ "text_kwargs": {
58
+ "padding": False,
59
+ "return_mm_token_type_ids": True,
60
+ },
61
+ "videos_kwargs": {"return_metadata": True},
62
+ }
63
+
64
+
65
+ class Molmo2Processor(ProcessorMixin):
66
+ attributes = ["image_processor", "video_processor", "tokenizer"]
67
+ optional_attributes = [
68
+ "chat_template",
69
+ "time_mode",
70
+ "image_use_col_tokens",
71
+ "use_single_crop_col_tokens",
72
+ "use_single_crop_start_token",
73
+ "video_use_col_tokens",
74
+ "use_frame_special_tokens",
75
+ ]
76
+ image_processor_class = "AutoImageProcessor"
77
+ video_processor_class = "AutoVideoProcessor"
78
+ tokenizer_class = "AutoTokenizer"
79
+
80
+ def __init__(
81
+ self,
82
+ image_processor: Molmo2ImageProcessor = None,
83
+ video_processor: Molmo2VideoProcessor = None,
84
+ tokenizer: AutoTokenizer = None,
85
+ chat_template: Optional[str] = None,
86
+ image_use_col_tokens: Optional[bool] = True,
87
+ use_single_crop_col_tokens: Optional[bool] = None,
88
+ use_single_crop_start_token: Optional[bool] = True,
89
+ video_use_col_tokens: Optional[bool] = False,
90
+ use_frame_special_tokens: Optional[bool] = True,
91
+ **kwargs
92
+ ) -> None:
93
+ super().__init__(
94
+ image_processor,
95
+ video_processor,
96
+ tokenizer,
97
+ chat_template=chat_template,
98
+ image_use_col_tokens=image_use_col_tokens,
99
+ use_single_crop_col_tokens=use_single_crop_col_tokens,
100
+ use_single_crop_start_token=use_single_crop_start_token,
101
+ video_use_col_tokens=video_use_col_tokens,
102
+ use_frame_special_tokens=use_frame_special_tokens,
103
+ )
104
+
105
+ self.image_placeholder_token = IMAGE_PROMPT
106
+ self.video_placeholder_token = VIDEO_PROMPT
107
+ self.image_token_ids = [
108
+ tokenizer.convert_tokens_to_ids(token)
109
+ for token in IMAGE_TOKENS
110
+ ]
111
+
112
+ def get_image_tokens(self, image_grid: np.ndarray):
113
+ resized_h, resized_w, height, width = image_grid
114
+ per_row = np.full(width, IMAGE_PATCH_TOKEN)
115
+ if self.image_use_col_tokens:
116
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
117
+ joint = [
118
+ [IM_START_TOKEN],
119
+ np.tile(per_row, [height]),
120
+ [IM_END_TOKEN],
121
+ ]
122
+ per_row = np.full(resized_w, IMAGE_PATCH_TOKEN)
123
+ use_single_crop_col_tokens = (
124
+ self.image_use_col_tokens
125
+ if self.use_single_crop_col_tokens is None
126
+ else self.use_single_crop_col_tokens
127
+ )
128
+ image_start_token = (
129
+ LOW_RES_IMAGE_START_TOKEN
130
+ if self.use_single_crop_start_token
131
+ else IM_START_TOKEN
132
+ )
133
+ if use_single_crop_col_tokens:
134
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
135
+ joint = [
136
+ [image_start_token],
137
+ np.tile(per_row, [resized_h]),
138
+ [IM_END_TOKEN],
139
+ ] + joint
140
+
141
+ return np.concatenate(joint)
142
+
143
+ def get_video_string(
144
+ self,
145
+ video_grid: np.ndarray,
146
+ timestamps: np.ndarray,
147
+ ):
148
+ if self.use_frame_special_tokens:
149
+ start_token_id = FRAME_START_TOKEN
150
+ end_token_id = FRAME_END_TOKEN
151
+ else:
152
+ start_token_id = IM_START_TOKEN
153
+ end_token_id = IM_END_TOKEN
154
+
155
+ num_frames, h, w = video_grid
156
+ video_string: str = ""
157
+ for frame_idx, frame_time in enumerate(timestamps):
158
+ # `per-frame-compact` time mode
159
+ prev_space = " " if frame_idx > 0 else ""
160
+ frame_prefix = prev_space + f"{frame_time:.1f} " # explicit whitespace before/after image tokens
161
+
162
+ video_string += frame_prefix
163
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
164
+ if self.video_use_col_tokens:
165
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
166
+ extra_tokens = np.tile(per_row, [h])
167
+ video_tokens = [
168
+ [start_token_id],
169
+ extra_tokens,
170
+ [end_token_id],
171
+ ]
172
+ video_string += "".join(np.concatenate(video_tokens, 0))
173
+
174
+ return video_string
175
+
176
+ def insert_bos(
177
+ self,
178
+ input_ids: np.ndarray,
179
+ attention_mask: np.ndarray,
180
+ bos_token_id: int,
181
+ pad_token_id: int,
182
+ ):
183
+ """
184
+ Args:
185
+ input_ids: [B, S] array with left padding
186
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
187
+ bos_token_id: int
188
+ pad_token_id: int
189
+ Returns:
190
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
191
+ attention_mask_out: same shape as input_ids_out
192
+ """
193
+
194
+ need_to_expand = len(input_ids.shape) == 1
195
+ if need_to_expand:
196
+ input_ids = input_ids[None, :]
197
+ attention_mask = attention_mask[None, :]
198
+
199
+ B, S = input_ids.shape
200
+
201
+ # Handle zero-length sequence
202
+ if S == 0:
203
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
204
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
205
+ if need_to_expand:
206
+ new_input_ids = new_input_ids[0]
207
+ new_attention_mask = new_attention_mask[0]
208
+ return new_input_ids, new_attention_mask
209
+
210
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
211
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
212
+
213
+ if bos_already_present:
214
+ if need_to_expand:
215
+ input_ids = input_ids[0]
216
+ attention_mask = attention_mask[0]
217
+ return input_ids, attention_mask
218
+ else:
219
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
220
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
221
+
222
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
223
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
224
+ tgt_idx = src_idx + 1 # shit right
225
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
226
+
227
+ # flatten valid_positions
228
+ flat_vals = input_ids[valid_mask]
229
+ flat_batch = batch_idx[valid_mask]
230
+ flat_tgt = tgt_idx[valid_mask]
231
+
232
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
233
+ new_attention_mask[flat_batch, flat_tgt] = 1
234
+
235
+ insert_pos = first_valid_index
236
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
237
+ new_attention_mask[np.arange(B), insert_pos] = 1
238
+
239
+ if need_to_expand:
240
+ new_input_ids = new_input_ids[0]
241
+ new_attention_mask = new_attention_mask[0]
242
+
243
+ return new_input_ids, new_attention_mask
244
+
245
+ def __call__(
246
+ self,
247
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
248
+ images: ImageInput = None,
249
+ videos: VideoInput = None,
250
+ **kwargs: Unpack[Molmo2ProcessorKwargs],
251
+ ) -> BatchFeature:
252
+ """
253
+
254
+ Args:
255
+ text (`str`, `list[str]`, `list[list[str]]`):
256
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
257
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
258
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
259
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
260
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
261
+ tensor. Both channels-first and channels-last formats are supported.
262
+ videos (`dict[str, Any]` or `list[dict[str, Any]]`):
263
+ The video or batch of videos to be prepared. Each video can be a dictionary with the following keys:
264
+ - `"frames"`: `np.ndarray` of shape (T, H, W, 3)
265
+ - `"timestamps"`: `np.ndarray` of shape (T,)
266
+ - `"sampled_fps"`: `float` (optional)
267
+ - `"sampling_augmentation"`: `str` (optional)
268
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
269
+ If set, will return tensors of a particular framework. Acceptable values are:
270
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
271
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
272
+ - `'np'`: Return NumPy `np.ndarray` objects.
273
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
274
+
275
+ Returns:
276
+ `BatchFeature`: A [`BatchFeature`] with the following fields:
277
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
278
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
279
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`).
280
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
281
+ - **image_token_pooling** -- Indices of the patches in `image_grids` to pool for each token in `image_tokens`.
282
+ Returned when `images` is not `None`.
283
+ - **image_grids** -- Grids of images. Returned when `images` is not `None`.
284
+ - **image_num_crops** -- Number of crops for each image. Returned when `images` is not `None`.
285
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
286
+ - **video_token_pooling** -- Indices of the patches in `video_grids` to pool for each token in `video_tokens`.
287
+ Returned when `videos` is not `None`.
288
+ - **video_grids** -- Grids of videos. Returned when `videos` is not `None`.
289
+ """
290
+
291
+ output_kwargs = self._merge_kwargs(
292
+ Molmo2ProcessorKwargs,
293
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
294
+ **kwargs,
295
+ )
296
+
297
+ if images is not None:
298
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
299
+ image_grids = image_inputs["image_grids"]
300
+ else:
301
+ image_inputs = {}
302
+ image_grids = None
303
+
304
+ if videos is not None:
305
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
306
+ video_grids = videos_inputs["video_grids"]
307
+ # If user has not requested video metadata, pop it
308
+ if "return_metadata" not in kwargs:
309
+ video_metadata = videos_inputs.pop("video_metadata")
310
+ else:
311
+ video_metadata = videos_inputs["video_metadata"]
312
+ else:
313
+ videos_inputs = {}
314
+ video_grids = None
315
+
316
+ if not isinstance(text, list):
317
+ text = [text]
318
+
319
+ text = text.copy() # below lines change text in-place
320
+
321
+ if image_grids is not None:
322
+ index = 0
323
+ for i in range(len(text)):
324
+ num_images = text[i].count(self.image_placeholder_token)
325
+ image_grids_i = image_grids[index:index+num_images]
326
+ for image_grid in image_grids_i:
327
+ image_tokens = self.get_image_tokens(image_grid)
328
+ image_string = "".join(image_tokens)
329
+ text[i] = text[i].replace(self.image_placeholder_token, image_string, 1)
330
+ index += num_images
331
+
332
+ if video_grids is not None:
333
+ index = 0
334
+ for i in range(len(text)):
335
+ num_videos = text[i].count(self.video_placeholder_token)
336
+ assert num_videos in {0, 1}, "At most one video is supported for now"
337
+ video_grids_i = video_grids[index:index+num_videos]
338
+ metadata_i = video_metadata[index:index+num_videos]
339
+ for video_grid, metadata in zip(video_grids_i, metadata_i):
340
+ video_string = self.get_video_string(
341
+ video_grid,
342
+ metadata.timestamps,
343
+ )
344
+ text[i] = text[i].replace(self.video_placeholder_token, video_string, 1)
345
+ index += num_videos
346
+
347
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
348
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
349
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
350
+
351
+ input_ids = text_inputs["input_ids"]
352
+ attention_mask = text_inputs["attention_mask"]
353
+
354
+ input_ids = np.array(input_ids)
355
+ attention_mask = np.array(attention_mask)
356
+
357
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
358
+ input_ids, attention_mask = self.insert_bos(
359
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
360
+ )
361
+
362
+ if return_mm_token_type_ids:
363
+ image_tokens = np.array(self.image_token_ids).astype(input_ids.dtype)
364
+ token_type_ids = np.any(input_ids[:, :, None] == image_tokens[None, None, :], axis=-1)
365
+ text_inputs["token_type_ids"] = token_type_ids.tolist()
366
+
367
+ text_inputs["input_ids"] = input_ids.tolist()
368
+ text_inputs["attention_mask"] = attention_mask.tolist()
369
+
370
+ return BatchFeature(
371
+ data={**text_inputs, **image_inputs, **videos_inputs},
372
+ tensor_type=return_tensors,
373
+ )
374
+
375
+ def post_process_image_text_to_text(
376
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
377
+ ):
378
+ """
379
+ Post-process the output of the model to decode the text.
380
+
381
+ Args:
382
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
383
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
384
+ or `(sequence_length,)`.
385
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
386
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
387
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
388
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
389
+ **kwargs:
390
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
391
+
392
+ Returns:
393
+ `list[str]`: The decoded text.
394
+ """
395
+ return self.tokenizer.batch_decode(
396
+ generated_outputs,
397
+ skip_special_tokens=skip_special_tokens,
398
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
399
+ **kwargs,
400
+ )
401
+
402
+
403
+ Molmo2Processor.register_for_auto_class()
processor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
4
+ },
5
+ "image_use_col_tokens": true,
6
+ "processor_class": "Molmo2Processor",
7
+ "use_frame_special_tokens": false,
8
+ "use_single_crop_col_tokens": false,
9
+ "use_single_crop_start_token": true,
10
+ "video_use_col_tokens": false
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "|<EXTRA_TOKENS_0>|",
4
+ "|<EXTRA_TOKENS_1>|",
5
+ "|<EXTRA_TOKENS_2>|",
6
+ "|<EXTRA_TOKENS_3>|",
7
+ "|<EXTRA_TOKENS_4>|",
8
+ "|<EXTRA_TOKENS_5>|",
9
+ "|<EXTRA_TOKENS_6>|",
10
+ "|<EXTRA_TOKENS_7>|",
11
+ "|<EXTRA_TOKENS_8>|",
12
+ "|<EXTRA_TOKENS_9>|",
13
+ "|<EXTRA_TOKENS_10>|",
14
+ "|<EXTRA_TOKENS_11>|",
15
+ "|<EXTRA_TOKENS_12>|",
16
+ "|<EXTRA_TOKENS_13>|",
17
+ "|<EXTRA_TOKENS_14>|",
18
+ "|<EXTRA_TOKENS_15>|",
19
+ "|<EXTRA_TOKENS_16>|",
20
+ "|<EXTRA_TOKENS_17>|",
21
+ "|<EXTRA_TOKENS_18>|",
22
+ "|<EXTRA_TOKENS_19>|",
23
+ "|<EXTRA_TOKENS_20>|",
24
+ "|<EXTRA_TOKENS_21>|",
25
+ "|<EXTRA_TOKENS_22>|",
26
+ "|<EXTRA_TOKENS_23>|",
27
+ "|<EXTRA_TOKENS_24>|",
28
+ "|<EXTRA_TOKENS_25>|",
29
+ "|<EXTRA_TOKENS_26>|",
30
+ "|<EXTRA_TOKENS_27>|",
31
+ "|<EXTRA_TOKENS_28>|",
32
+ "|<EXTRA_TOKENS_29>|",
33
+ "|<EXTRA_TOKENS_30>|",
34
+ "|<EXTRA_TOKENS_31>|",
35
+ "|<EXTRA_TOKENS_32>|",
36
+ "|<EXTRA_TOKENS_33>|",
37
+ "|<EXTRA_TOKENS_34>|",
38
+ "|<EXTRA_TOKENS_35>|",
39
+ "|<EXTRA_TOKENS_36>|",
40
+ "|<EXTRA_TOKENS_37>|",
41
+ "|<EXTRA_TOKENS_38>|",
42
+ "|<EXTRA_TOKENS_39>|",
43
+ "|<EXTRA_TOKENS_40>|",
44
+ "|<EXTRA_TOKENS_41>|",
45
+ "|<EXTRA_TOKENS_42>|",
46
+ "|<EXTRA_TOKENS_43>|",
47
+ "|<EXTRA_TOKENS_44>|",
48
+ "|<EXTRA_TOKENS_45>|",
49
+ "|<EXTRA_TOKENS_46>|",
50
+ "|<EXTRA_TOKENS_47>|",
51
+ "|<EXTRA_TOKENS_48>|",
52
+ "|<EXTRA_TOKENS_49>|",
53
+ "|<EXTRA_TOKENS_50>|",
54
+ "|<EXTRA_TOKENS_51>|",
55
+ "|<EXTRA_TOKENS_52>|",
56
+ "|<EXTRA_TOKENS_53>|",
57
+ "|<EXTRA_TOKENS_54>|",
58
+ "|<EXTRA_TOKENS_55>|",
59
+ "|<EXTRA_TOKENS_56>|",
60
+ "|<EXTRA_TOKENS_57>|",
61
+ "|<EXTRA_TOKENS_58>|",
62
+ "|<EXTRA_TOKENS_59>|",
63
+ "|<EXTRA_TOKENS_60>|",
64
+ "|<EXTRA_TOKENS_61>|",
65
+ "|<EXTRA_TOKENS_62>|",
66
+ "|<EXTRA_TOKENS_63>|",
67
+ "|<EXTRA_TOKENS_64>|",
68
+ "|<EXTRA_TOKENS_65>|",
69
+ "|<EXTRA_TOKENS_66>|",
70
+ "|<EXTRA_TOKENS_67>|",
71
+ "|<EXTRA_TOKENS_68>|",
72
+ "|<EXTRA_TOKENS_69>|",
73
+ "|<EXTRA_TOKENS_70>|",
74
+ "|<EXTRA_TOKENS_71>|",
75
+ "|<EXTRA_TOKENS_72>|",
76
+ "|<EXTRA_TOKENS_73>|",
77
+ "|<EXTRA_TOKENS_74>|",
78
+ "|<EXTRA_TOKENS_75>|",
79
+ "|<EXTRA_TOKENS_76>|",
80
+ "|<EXTRA_TOKENS_77>|",
81
+ "|<EXTRA_TOKENS_78>|",
82
+ "|<EXTRA_TOKENS_79>|",
83
+ "|<EXTRA_TOKENS_80>|",
84
+ "|<EXTRA_TOKENS_81>|",
85
+ "|<EXTRA_TOKENS_82>|",
86
+ "|<EXTRA_TOKENS_83>|",
87
+ "|<EXTRA_TOKENS_84>|",
88
+ "|<EXTRA_TOKENS_85>|",
89
+ "|<EXTRA_TOKENS_86>|",
90
+ "|<EXTRA_TOKENS_87>|",
91
+ "|<EXTRA_TOKENS_88>|",
92
+ "|<EXTRA_TOKENS_89>|",
93
+ "|<EXTRA_TOKENS_90>|",
94
+ "|<EXTRA_TOKENS_91>|",
95
+ "|<EXTRA_TOKENS_92>|",
96
+ "|<EXTRA_TOKENS_93>|",
97
+ "|<EXTRA_TOKENS_94>|",
98
+ "|<EXTRA_TOKENS_95>|",
99
+ "|<EXTRA_TOKENS_96>|",
100
+ "|<EXTRA_TOKENS_97>|",
101
+ "|<EXTRA_TOKENS_98>|",
102
+ "|<EXTRA_TOKENS_99>|",
103
+ "|<EXTRA_TOKENS_100>|",
104
+ "|<EXTRA_TOKENS_101>|",
105
+ "|<EXTRA_TOKENS_102>|",
106
+ "|<EXTRA_TOKENS_103>|",
107
+ "|<EXTRA_TOKENS_104>|",
108
+ "|<EXTRA_TOKENS_105>|",
109
+ "|<EXTRA_TOKENS_106>|",
110
+ "|<EXTRA_TOKENS_107>|",
111
+ "|<EXTRA_TOKENS_108>|",
112
+ "|<EXTRA_TOKENS_109>|",
113
+ "|<EXTRA_TOKENS_110>|",
114
+ "|<EXTRA_TOKENS_111>|",
115
+ "|<EXTRA_TOKENS_112>|",
116
+ "|<EXTRA_TOKENS_113>|",
117
+ "|<EXTRA_TOKENS_114>|",
118
+ "|<EXTRA_TOKENS_115>|",
119
+ "|<EXTRA_TOKENS_116>|",
120
+ "|<EXTRA_TOKENS_117>|",
121
+ "|<EXTRA_TOKENS_118>|",
122
+ "|<EXTRA_TOKENS_119>|",
123
+ "|<EXTRA_TOKENS_120>|",
124
+ "|<EXTRA_TOKENS_121>|",
125
+ "|<EXTRA_TOKENS_122>|",
126
+ "|<EXTRA_TOKENS_123>|",
127
+ "|<EXTRA_TOKENS_124>|",
128
+ "|<EXTRA_TOKENS_125>|",
129
+ "|<EXTRA_TOKENS_126>|",
130
+ "|<EXTRA_TOKENS_127>|",
131
+ "|<EXTRA_TOKENS_128>|",
132
+ "|<EXTRA_TOKENS_129>|",
133
+ "|<EXTRA_TOKENS_130>|",
134
+ "|<EXTRA_TOKENS_131>|",
135
+ "|<EXTRA_TOKENS_132>|",
136
+ "|<EXTRA_TOKENS_133>|",
137
+ "|<EXTRA_TOKENS_134>|",
138
+ "|<EXTRA_TOKENS_135>|",
139
+ "|<EXTRA_TOKENS_136>|",
140
+ "|<EXTRA_TOKENS_137>|",
141
+ "|<EXTRA_TOKENS_138>|",
142
+ "|<EXTRA_TOKENS_139>|",
143
+ "|<EXTRA_TOKENS_140>|",
144
+ "|<EXTRA_TOKENS_141>|",
145
+ "|<EXTRA_TOKENS_142>|",
146
+ "|<EXTRA_TOKENS_143>|",
147
+ "|<EXTRA_TOKENS_144>|",
148
+ "|<EXTRA_TOKENS_145>|",
149
+ "|<EXTRA_TOKENS_146>|",
150
+ "|<EXTRA_TOKENS_147>|",
151
+ "|<EXTRA_TOKENS_148>|",
152
+ "|<EXTRA_TOKENS_149>|",
153
+ "|<EXTRA_TOKENS_150>|",
154
+ "|<EXTRA_TOKENS_151>|",
155
+ "|<EXTRA_TOKENS_152>|",
156
+ "|<EXTRA_TOKENS_153>|",
157
+ "|<EXTRA_TOKENS_154>|",
158
+ "|<EXTRA_TOKENS_155>|",
159
+ "|<EXTRA_TOKENS_156>|",
160
+ "|<EXTRA_TOKENS_157>|",
161
+ "|<EXTRA_TOKENS_158>|",
162
+ "|<EXTRA_TOKENS_159>|",
163
+ "|<EXTRA_TOKENS_160>|",
164
+ "|<EXTRA_TOKENS_161>|",
165
+ "|<EXTRA_TOKENS_162>|",
166
+ "|<EXTRA_TOKENS_163>|",
167
+ "|<EXTRA_TOKENS_164>|",
168
+ "|<EXTRA_TOKENS_165>|",
169
+ "|<EXTRA_TOKENS_166>|",
170
+ "|<EXTRA_TOKENS_167>|",
171
+ "|<EXTRA_TOKENS_168>|",
172
+ "|<EXTRA_TOKENS_169>|",
173
+ "|<EXTRA_TOKENS_170>|",
174
+ "|<EXTRA_TOKENS_171>|",
175
+ "|<EXTRA_TOKENS_172>|",
176
+ "|<EXTRA_TOKENS_173>|",
177
+ "|<EXTRA_TOKENS_174>|",
178
+ "|<EXTRA_TOKENS_175>|",
179
+ "|<EXTRA_TOKENS_176>|",
180
+ "|<EXTRA_TOKENS_177>|",
181
+ "|<EXTRA_TOKENS_178>|",
182
+ "|<EXTRA_TOKENS_179>|",
183
+ "|<EXTRA_TOKENS_180>|",
184
+ "|<EXTRA_TOKENS_181>|",
185
+ "|<EXTRA_TOKENS_182>|",
186
+ "|<EXTRA_TOKENS_183>|",
187
+ "|<EXTRA_TOKENS_184>|",
188
+ "|<EXTRA_TOKENS_185>|",
189
+ "|<EXTRA_TOKENS_186>|",
190
+ "|<EXTRA_TOKENS_187>|",
191
+ "|<EXTRA_TOKENS_188>|",
192
+ "|<EXTRA_TOKENS_189>|",
193
+ "|<EXTRA_TOKENS_190>|",
194
+ "|<EXTRA_TOKENS_191>|",
195
+ "|<EXTRA_TOKENS_192>|",
196
+ "|<EXTRA_TOKENS_193>|",
197
+ "|<EXTRA_TOKENS_194>|",
198
+ "|<EXTRA_TOKENS_195>|",
199
+ "|<EXTRA_TOKENS_196>|",
200
+ "|<EXTRA_TOKENS_197>|",
201
+ "|<EXTRA_TOKENS_198>|",
202
+ "|<EXTRA_TOKENS_199>|",
203
+ "|<EXTRA_TOKENS_200>|",
204
+ "|<EXTRA_TOKENS_201>|",
205
+ "|<EXTRA_TOKENS_202>|",
206
+ "|<EXTRA_TOKENS_203>|",
207
+ "|<EXTRA_TOKENS_204>|",
208
+ "|<EXTRA_TOKENS_205>|",
209
+ "|<EXTRA_TOKENS_206>|",
210
+ "|<EXTRA_TOKENS_207>|",
211
+ "|<EXTRA_TOKENS_208>|",
212
+ "|<EXTRA_TOKENS_209>|",
213
+ "|<EXTRA_TOKENS_210>|",
214
+ "|<EXTRA_TOKENS_211>|",
215
+ "|<EXTRA_TOKENS_212>|",
216
+ "|<EXTRA_TOKENS_213>|",
217
+ "|<EXTRA_TOKENS_214>|",
218
+ "|<EXTRA_TOKENS_215>|",
219
+ "|<EXTRA_TOKENS_216>|",
220
+ "|<EXTRA_TOKENS_217>|",
221
+ "|<EXTRA_TOKENS_218>|",
222
+ "|<EXTRA_TOKENS_219>|",
223
+ "|<EXTRA_TOKENS_220>|",
224
+ "|<EXTRA_TOKENS_221>|",
225
+ "|<EXTRA_TOKENS_222>|",
226
+ "|<EXTRA_TOKENS_223>|",
227
+ "|<EXTRA_TOKENS_224>|",
228
+ "|<EXTRA_TOKENS_225>|",
229
+ "|<EXTRA_TOKENS_226>|",
230
+ "|<EXTRA_TOKENS_227>|",
231
+ "|<EXTRA_TOKENS_228>|",
232
+ "|<EXTRA_TOKENS_229>|",
233
+ "|<EXTRA_TOKENS_230>|",
234
+ "|<EXTRA_TOKENS_231>|",
235
+ "|<EXTRA_TOKENS_232>|",
236
+ "|<EXTRA_TOKENS_233>|",
237
+ "|<EXTRA_TOKENS_234>|",
238
+ "|<EXTRA_TOKENS_235>|",
239
+ "|<EXTRA_TOKENS_236>|",
240
+ "|<EXTRA_TOKENS_237>|",
241
+ "|<EXTRA_TOKENS_238>|",
242
+ "|<EXTRA_TOKENS_239>|",
243
+ "|<EXTRA_TOKENS_240>|",
244
+ "|<EXTRA_TOKENS_241>|",
245
+ "|<EXTRA_TOKENS_242>|",
246
+ "|<EXTRA_TOKENS_243>|",
247
+ "|<EXTRA_TOKENS_244>|",
248
+ "|<EXTRA_TOKENS_245>|",
249
+ "|<EXTRA_TOKENS_246>|",
250
+ "|<EXTRA_TOKENS_247>|",
251
+ "|<EXTRA_TOKENS_248>|",
252
+ "|<EXTRA_TOKENS_249>|",
253
+ "|<EXTRA_TOKENS_250>|",
254
+ "|<EXTRA_TOKENS_251>|",
255
+ "|<EXTRA_TOKENS_252>|",
256
+ "|<EXTRA_TOKENS_253>|",
257
+ "|<EXTRA_TOKENS_254>|",
258
+ "|<EXTRA_TOKENS_255>|",
259
+ "|<EXTRA_TOKENS_256>|",
260
+ "|<EXTRA_TOKENS_257>|",
261
+ "|<EXTRA_TOKENS_258>|",
262
+ "|<EXTRA_TOKENS_259>|",
263
+ "|<EXTRA_TOKENS_260>|",
264
+ "|<EXTRA_TOKENS_261>|",
265
+ "|<EXTRA_TOKENS_262>|",
266
+ "|<EXTRA_TOKENS_263>|",
267
+ "|<EXTRA_TOKENS_264>|",
268
+ "|<EXTRA_TOKENS_265>|",
269
+ "|<EXTRA_TOKENS_266>|",
270
+ "<im_start>",
271
+ "<im_end>",
272
+ "<im_patch>",
273
+ "<im_col>",
274
+ "<low_res_im_start>",
275
+ "<|image|>",
276
+ "<im_low>",
277
+ "<frame_start>",
278
+ "<frame_end>",
279
+ "<|video|>"
280
+ ],
281
+ "bos_token": "<|im_end|>",
282
+ "eos_token": {
283
+ "content": "<|im_end|>",
284
+ "lstrip": false,
285
+ "normalized": false,
286
+ "rstrip": false,
287
+ "single_word": false
288
+ },
289
+ "pad_token": {
290
+ "content": "<|endoftext|>",
291
+ "lstrip": false,
292
+ "normalized": false,
293
+ "rstrip": false,
294
+ "single_word": false
295
+ }
296
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e80901c901584f416b8fd4349fd60022774b89ba4377626511f0562cc599f7
3
+ size 11477017
tokenizer_config.json ADDED
@@ -0,0 +1,2723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "|<EXTRA_TOKENS_0>|",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ },
221
+ "151670": {
222
+ "content": "|<EXTRA_TOKENS_1>|",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "151671": {
230
+ "content": "|<EXTRA_TOKENS_2>|",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ },
237
+ "151672": {
238
+ "content": "|<EXTRA_TOKENS_3>|",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": true
244
+ },
245
+ "151673": {
246
+ "content": "|<EXTRA_TOKENS_4>|",
247
+ "lstrip": false,
248
+ "normalized": false,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": true
252
+ },
253
+ "151674": {
254
+ "content": "|<EXTRA_TOKENS_5>|",
255
+ "lstrip": false,
256
+ "normalized": false,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": true
260
+ },
261
+ "151675": {
262
+ "content": "|<EXTRA_TOKENS_6>|",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "151676": {
270
+ "content": "|<EXTRA_TOKENS_7>|",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "151677": {
278
+ "content": "|<EXTRA_TOKENS_8>|",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ },
285
+ "151678": {
286
+ "content": "|<EXTRA_TOKENS_9>|",
287
+ "lstrip": false,
288
+ "normalized": false,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": true
292
+ },
293
+ "151679": {
294
+ "content": "|<EXTRA_TOKENS_10>|",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": true
300
+ },
301
+ "151680": {
302
+ "content": "|<EXTRA_TOKENS_11>|",
303
+ "lstrip": false,
304
+ "normalized": false,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": true
308
+ },
309
+ "151681": {
310
+ "content": "|<EXTRA_TOKENS_12>|",
311
+ "lstrip": false,
312
+ "normalized": false,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": true
316
+ },
317
+ "151682": {
318
+ "content": "|<EXTRA_TOKENS_13>|",
319
+ "lstrip": false,
320
+ "normalized": false,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": true
324
+ },
325
+ "151683": {
326
+ "content": "|<EXTRA_TOKENS_14>|",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": true
332
+ },
333
+ "151684": {
334
+ "content": "|<EXTRA_TOKENS_15>|",
335
+ "lstrip": false,
336
+ "normalized": false,
337
+ "rstrip": false,
338
+ "single_word": false,
339
+ "special": true
340
+ },
341
+ "151685": {
342
+ "content": "|<EXTRA_TOKENS_16>|",
343
+ "lstrip": false,
344
+ "normalized": false,
345
+ "rstrip": false,
346
+ "single_word": false,
347
+ "special": true
348
+ },
349
+ "151686": {
350
+ "content": "|<EXTRA_TOKENS_17>|",
351
+ "lstrip": false,
352
+ "normalized": false,
353
+ "rstrip": false,
354
+ "single_word": false,
355
+ "special": true
356
+ },
357
+ "151687": {
358
+ "content": "|<EXTRA_TOKENS_18>|",
359
+ "lstrip": false,
360
+ "normalized": false,
361
+ "rstrip": false,
362
+ "single_word": false,
363
+ "special": true
364
+ },
365
+ "151688": {
366
+ "content": "|<EXTRA_TOKENS_19>|",
367
+ "lstrip": false,
368
+ "normalized": false,
369
+ "rstrip": false,
370
+ "single_word": false,
371
+ "special": true
372
+ },
373
+ "151689": {
374
+ "content": "|<EXTRA_TOKENS_20>|",
375
+ "lstrip": false,
376
+ "normalized": false,
377
+ "rstrip": false,
378
+ "single_word": false,
379
+ "special": true
380
+ },
381
+ "151690": {
382
+ "content": "|<EXTRA_TOKENS_21>|",
383
+ "lstrip": false,
384
+ "normalized": false,
385
+ "rstrip": false,
386
+ "single_word": false,
387
+ "special": true
388
+ },
389
+ "151691": {
390
+ "content": "|<EXTRA_TOKENS_22>|",
391
+ "lstrip": false,
392
+ "normalized": false,
393
+ "rstrip": false,
394
+ "single_word": false,
395
+ "special": true
396
+ },
397
+ "151692": {
398
+ "content": "|<EXTRA_TOKENS_23>|",
399
+ "lstrip": false,
400
+ "normalized": false,
401
+ "rstrip": false,
402
+ "single_word": false,
403
+ "special": true
404
+ },
405
+ "151693": {
406
+ "content": "|<EXTRA_TOKENS_24>|",
407
+ "lstrip": false,
408
+ "normalized": false,
409
+ "rstrip": false,
410
+ "single_word": false,
411
+ "special": true
412
+ },
413
+ "151694": {
414
+ "content": "|<EXTRA_TOKENS_25>|",
415
+ "lstrip": false,
416
+ "normalized": false,
417
+ "rstrip": false,
418
+ "single_word": false,
419
+ "special": true
420
+ },
421
+ "151695": {
422
+ "content": "|<EXTRA_TOKENS_26>|",
423
+ "lstrip": false,
424
+ "normalized": false,
425
+ "rstrip": false,
426
+ "single_word": false,
427
+ "special": true
428
+ },
429
+ "151696": {
430
+ "content": "|<EXTRA_TOKENS_27>|",
431
+ "lstrip": false,
432
+ "normalized": false,
433
+ "rstrip": false,
434
+ "single_word": false,
435
+ "special": true
436
+ },
437
+ "151697": {
438
+ "content": "|<EXTRA_TOKENS_28>|",
439
+ "lstrip": false,
440
+ "normalized": false,
441
+ "rstrip": false,
442
+ "single_word": false,
443
+ "special": true
444
+ },
445
+ "151698": {
446
+ "content": "|<EXTRA_TOKENS_29>|",
447
+ "lstrip": false,
448
+ "normalized": false,
449
+ "rstrip": false,
450
+ "single_word": false,
451
+ "special": true
452
+ },
453
+ "151699": {
454
+ "content": "|<EXTRA_TOKENS_30>|",
455
+ "lstrip": false,
456
+ "normalized": false,
457
+ "rstrip": false,
458
+ "single_word": false,
459
+ "special": true
460
+ },
461
+ "151700": {
462
+ "content": "|<EXTRA_TOKENS_31>|",
463
+ "lstrip": false,
464
+ "normalized": false,
465
+ "rstrip": false,
466
+ "single_word": false,
467
+ "special": true
468
+ },
469
+ "151701": {
470
+ "content": "|<EXTRA_TOKENS_32>|",
471
+ "lstrip": false,
472
+ "normalized": false,
473
+ "rstrip": false,
474
+ "single_word": false,
475
+ "special": true
476
+ },
477
+ "151702": {
478
+ "content": "|<EXTRA_TOKENS_33>|",
479
+ "lstrip": false,
480
+ "normalized": false,
481
+ "rstrip": false,
482
+ "single_word": false,
483
+ "special": true
484
+ },
485
+ "151703": {
486
+ "content": "|<EXTRA_TOKENS_34>|",
487
+ "lstrip": false,
488
+ "normalized": false,
489
+ "rstrip": false,
490
+ "single_word": false,
491
+ "special": true
492
+ },
493
+ "151704": {
494
+ "content": "|<EXTRA_TOKENS_35>|",
495
+ "lstrip": false,
496
+ "normalized": false,
497
+ "rstrip": false,
498
+ "single_word": false,
499
+ "special": true
500
+ },
501
+ "151705": {
502
+ "content": "|<EXTRA_TOKENS_36>|",
503
+ "lstrip": false,
504
+ "normalized": false,
505
+ "rstrip": false,
506
+ "single_word": false,
507
+ "special": true
508
+ },
509
+ "151706": {
510
+ "content": "|<EXTRA_TOKENS_37>|",
511
+ "lstrip": false,
512
+ "normalized": false,
513
+ "rstrip": false,
514
+ "single_word": false,
515
+ "special": true
516
+ },
517
+ "151707": {
518
+ "content": "|<EXTRA_TOKENS_38>|",
519
+ "lstrip": false,
520
+ "normalized": false,
521
+ "rstrip": false,
522
+ "single_word": false,
523
+ "special": true
524
+ },
525
+ "151708": {
526
+ "content": "|<EXTRA_TOKENS_39>|",
527
+ "lstrip": false,
528
+ "normalized": false,
529
+ "rstrip": false,
530
+ "single_word": false,
531
+ "special": true
532
+ },
533
+ "151709": {
534
+ "content": "|<EXTRA_TOKENS_40>|",
535
+ "lstrip": false,
536
+ "normalized": false,
537
+ "rstrip": false,
538
+ "single_word": false,
539
+ "special": true
540
+ },
541
+ "151710": {
542
+ "content": "|<EXTRA_TOKENS_41>|",
543
+ "lstrip": false,
544
+ "normalized": false,
545
+ "rstrip": false,
546
+ "single_word": false,
547
+ "special": true
548
+ },
549
+ "151711": {
550
+ "content": "|<EXTRA_TOKENS_42>|",
551
+ "lstrip": false,
552
+ "normalized": false,
553
+ "rstrip": false,
554
+ "single_word": false,
555
+ "special": true
556
+ },
557
+ "151712": {
558
+ "content": "|<EXTRA_TOKENS_43>|",
559
+ "lstrip": false,
560
+ "normalized": false,
561
+ "rstrip": false,
562
+ "single_word": false,
563
+ "special": true
564
+ },
565
+ "151713": {
566
+ "content": "|<EXTRA_TOKENS_44>|",
567
+ "lstrip": false,
568
+ "normalized": false,
569
+ "rstrip": false,
570
+ "single_word": false,
571
+ "special": true
572
+ },
573
+ "151714": {
574
+ "content": "|<EXTRA_TOKENS_45>|",
575
+ "lstrip": false,
576
+ "normalized": false,
577
+ "rstrip": false,
578
+ "single_word": false,
579
+ "special": true
580
+ },
581
+ "151715": {
582
+ "content": "|<EXTRA_TOKENS_46>|",
583
+ "lstrip": false,
584
+ "normalized": false,
585
+ "rstrip": false,
586
+ "single_word": false,
587
+ "special": true
588
+ },
589
+ "151716": {
590
+ "content": "|<EXTRA_TOKENS_47>|",
591
+ "lstrip": false,
592
+ "normalized": false,
593
+ "rstrip": false,
594
+ "single_word": false,
595
+ "special": true
596
+ },
597
+ "151717": {
598
+ "content": "|<EXTRA_TOKENS_48>|",
599
+ "lstrip": false,
600
+ "normalized": false,
601
+ "rstrip": false,
602
+ "single_word": false,
603
+ "special": true
604
+ },
605
+ "151718": {
606
+ "content": "|<EXTRA_TOKENS_49>|",
607
+ "lstrip": false,
608
+ "normalized": false,
609
+ "rstrip": false,
610
+ "single_word": false,
611
+ "special": true
612
+ },
613
+ "151719": {
614
+ "content": "|<EXTRA_TOKENS_50>|",
615
+ "lstrip": false,
616
+ "normalized": false,
617
+ "rstrip": false,
618
+ "single_word": false,
619
+ "special": true
620
+ },
621
+ "151720": {
622
+ "content": "|<EXTRA_TOKENS_51>|",
623
+ "lstrip": false,
624
+ "normalized": false,
625
+ "rstrip": false,
626
+ "single_word": false,
627
+ "special": true
628
+ },
629
+ "151721": {
630
+ "content": "|<EXTRA_TOKENS_52>|",
631
+ "lstrip": false,
632
+ "normalized": false,
633
+ "rstrip": false,
634
+ "single_word": false,
635
+ "special": true
636
+ },
637
+ "151722": {
638
+ "content": "|<EXTRA_TOKENS_53>|",
639
+ "lstrip": false,
640
+ "normalized": false,
641
+ "rstrip": false,
642
+ "single_word": false,
643
+ "special": true
644
+ },
645
+ "151723": {
646
+ "content": "|<EXTRA_TOKENS_54>|",
647
+ "lstrip": false,
648
+ "normalized": false,
649
+ "rstrip": false,
650
+ "single_word": false,
651
+ "special": true
652
+ },
653
+ "151724": {
654
+ "content": "|<EXTRA_TOKENS_55>|",
655
+ "lstrip": false,
656
+ "normalized": false,
657
+ "rstrip": false,
658
+ "single_word": false,
659
+ "special": true
660
+ },
661
+ "151725": {
662
+ "content": "|<EXTRA_TOKENS_56>|",
663
+ "lstrip": false,
664
+ "normalized": false,
665
+ "rstrip": false,
666
+ "single_word": false,
667
+ "special": true
668
+ },
669
+ "151726": {
670
+ "content": "|<EXTRA_TOKENS_57>|",
671
+ "lstrip": false,
672
+ "normalized": false,
673
+ "rstrip": false,
674
+ "single_word": false,
675
+ "special": true
676
+ },
677
+ "151727": {
678
+ "content": "|<EXTRA_TOKENS_58>|",
679
+ "lstrip": false,
680
+ "normalized": false,
681
+ "rstrip": false,
682
+ "single_word": false,
683
+ "special": true
684
+ },
685
+ "151728": {
686
+ "content": "|<EXTRA_TOKENS_59>|",
687
+ "lstrip": false,
688
+ "normalized": false,
689
+ "rstrip": false,
690
+ "single_word": false,
691
+ "special": true
692
+ },
693
+ "151729": {
694
+ "content": "|<EXTRA_TOKENS_60>|",
695
+ "lstrip": false,
696
+ "normalized": false,
697
+ "rstrip": false,
698
+ "single_word": false,
699
+ "special": true
700
+ },
701
+ "151730": {
702
+ "content": "|<EXTRA_TOKENS_61>|",
703
+ "lstrip": false,
704
+ "normalized": false,
705
+ "rstrip": false,
706
+ "single_word": false,
707
+ "special": true
708
+ },
709
+ "151731": {
710
+ "content": "|<EXTRA_TOKENS_62>|",
711
+ "lstrip": false,
712
+ "normalized": false,
713
+ "rstrip": false,
714
+ "single_word": false,
715
+ "special": true
716
+ },
717
+ "151732": {
718
+ "content": "|<EXTRA_TOKENS_63>|",
719
+ "lstrip": false,
720
+ "normalized": false,
721
+ "rstrip": false,
722
+ "single_word": false,
723
+ "special": true
724
+ },
725
+ "151733": {
726
+ "content": "|<EXTRA_TOKENS_64>|",
727
+ "lstrip": false,
728
+ "normalized": false,
729
+ "rstrip": false,
730
+ "single_word": false,
731
+ "special": true
732
+ },
733
+ "151734": {
734
+ "content": "|<EXTRA_TOKENS_65>|",
735
+ "lstrip": false,
736
+ "normalized": false,
737
+ "rstrip": false,
738
+ "single_word": false,
739
+ "special": true
740
+ },
741
+ "151735": {
742
+ "content": "|<EXTRA_TOKENS_66>|",
743
+ "lstrip": false,
744
+ "normalized": false,
745
+ "rstrip": false,
746
+ "single_word": false,
747
+ "special": true
748
+ },
749
+ "151736": {
750
+ "content": "|<EXTRA_TOKENS_67>|",
751
+ "lstrip": false,
752
+ "normalized": false,
753
+ "rstrip": false,
754
+ "single_word": false,
755
+ "special": true
756
+ },
757
+ "151737": {
758
+ "content": "|<EXTRA_TOKENS_68>|",
759
+ "lstrip": false,
760
+ "normalized": false,
761
+ "rstrip": false,
762
+ "single_word": false,
763
+ "special": true
764
+ },
765
+ "151738": {
766
+ "content": "|<EXTRA_TOKENS_69>|",
767
+ "lstrip": false,
768
+ "normalized": false,
769
+ "rstrip": false,
770
+ "single_word": false,
771
+ "special": true
772
+ },
773
+ "151739": {
774
+ "content": "|<EXTRA_TOKENS_70>|",
775
+ "lstrip": false,
776
+ "normalized": false,
777
+ "rstrip": false,
778
+ "single_word": false,
779
+ "special": true
780
+ },
781
+ "151740": {
782
+ "content": "|<EXTRA_TOKENS_71>|",
783
+ "lstrip": false,
784
+ "normalized": false,
785
+ "rstrip": false,
786
+ "single_word": false,
787
+ "special": true
788
+ },
789
+ "151741": {
790
+ "content": "|<EXTRA_TOKENS_72>|",
791
+ "lstrip": false,
792
+ "normalized": false,
793
+ "rstrip": false,
794
+ "single_word": false,
795
+ "special": true
796
+ },
797
+ "151742": {
798
+ "content": "|<EXTRA_TOKENS_73>|",
799
+ "lstrip": false,
800
+ "normalized": false,
801
+ "rstrip": false,
802
+ "single_word": false,
803
+ "special": true
804
+ },
805
+ "151743": {
806
+ "content": "|<EXTRA_TOKENS_74>|",
807
+ "lstrip": false,
808
+ "normalized": false,
809
+ "rstrip": false,
810
+ "single_word": false,
811
+ "special": true
812
+ },
813
+ "151744": {
814
+ "content": "|<EXTRA_TOKENS_75>|",
815
+ "lstrip": false,
816
+ "normalized": false,
817
+ "rstrip": false,
818
+ "single_word": false,
819
+ "special": true
820
+ },
821
+ "151745": {
822
+ "content": "|<EXTRA_TOKENS_76>|",
823
+ "lstrip": false,
824
+ "normalized": false,
825
+ "rstrip": false,
826
+ "single_word": false,
827
+ "special": true
828
+ },
829
+ "151746": {
830
+ "content": "|<EXTRA_TOKENS_77>|",
831
+ "lstrip": false,
832
+ "normalized": false,
833
+ "rstrip": false,
834
+ "single_word": false,
835
+ "special": true
836
+ },
837
+ "151747": {
838
+ "content": "|<EXTRA_TOKENS_78>|",
839
+ "lstrip": false,
840
+ "normalized": false,
841
+ "rstrip": false,
842
+ "single_word": false,
843
+ "special": true
844
+ },
845
+ "151748": {
846
+ "content": "|<EXTRA_TOKENS_79>|",
847
+ "lstrip": false,
848
+ "normalized": false,
849
+ "rstrip": false,
850
+ "single_word": false,
851
+ "special": true
852
+ },
853
+ "151749": {
854
+ "content": "|<EXTRA_TOKENS_80>|",
855
+ "lstrip": false,
856
+ "normalized": false,
857
+ "rstrip": false,
858
+ "single_word": false,
859
+ "special": true
860
+ },
861
+ "151750": {
862
+ "content": "|<EXTRA_TOKENS_81>|",
863
+ "lstrip": false,
864
+ "normalized": false,
865
+ "rstrip": false,
866
+ "single_word": false,
867
+ "special": true
868
+ },
869
+ "151751": {
870
+ "content": "|<EXTRA_TOKENS_82>|",
871
+ "lstrip": false,
872
+ "normalized": false,
873
+ "rstrip": false,
874
+ "single_word": false,
875
+ "special": true
876
+ },
877
+ "151752": {
878
+ "content": "|<EXTRA_TOKENS_83>|",
879
+ "lstrip": false,
880
+ "normalized": false,
881
+ "rstrip": false,
882
+ "single_word": false,
883
+ "special": true
884
+ },
885
+ "151753": {
886
+ "content": "|<EXTRA_TOKENS_84>|",
887
+ "lstrip": false,
888
+ "normalized": false,
889
+ "rstrip": false,
890
+ "single_word": false,
891
+ "special": true
892
+ },
893
+ "151754": {
894
+ "content": "|<EXTRA_TOKENS_85>|",
895
+ "lstrip": false,
896
+ "normalized": false,
897
+ "rstrip": false,
898
+ "single_word": false,
899
+ "special": true
900
+ },
901
+ "151755": {
902
+ "content": "|<EXTRA_TOKENS_86>|",
903
+ "lstrip": false,
904
+ "normalized": false,
905
+ "rstrip": false,
906
+ "single_word": false,
907
+ "special": true
908
+ },
909
+ "151756": {
910
+ "content": "|<EXTRA_TOKENS_87>|",
911
+ "lstrip": false,
912
+ "normalized": false,
913
+ "rstrip": false,
914
+ "single_word": false,
915
+ "special": true
916
+ },
917
+ "151757": {
918
+ "content": "|<EXTRA_TOKENS_88>|",
919
+ "lstrip": false,
920
+ "normalized": false,
921
+ "rstrip": false,
922
+ "single_word": false,
923
+ "special": true
924
+ },
925
+ "151758": {
926
+ "content": "|<EXTRA_TOKENS_89>|",
927
+ "lstrip": false,
928
+ "normalized": false,
929
+ "rstrip": false,
930
+ "single_word": false,
931
+ "special": true
932
+ },
933
+ "151759": {
934
+ "content": "|<EXTRA_TOKENS_90>|",
935
+ "lstrip": false,
936
+ "normalized": false,
937
+ "rstrip": false,
938
+ "single_word": false,
939
+ "special": true
940
+ },
941
+ "151760": {
942
+ "content": "|<EXTRA_TOKENS_91>|",
943
+ "lstrip": false,
944
+ "normalized": false,
945
+ "rstrip": false,
946
+ "single_word": false,
947
+ "special": true
948
+ },
949
+ "151761": {
950
+ "content": "|<EXTRA_TOKENS_92>|",
951
+ "lstrip": false,
952
+ "normalized": false,
953
+ "rstrip": false,
954
+ "single_word": false,
955
+ "special": true
956
+ },
957
+ "151762": {
958
+ "content": "|<EXTRA_TOKENS_93>|",
959
+ "lstrip": false,
960
+ "normalized": false,
961
+ "rstrip": false,
962
+ "single_word": false,
963
+ "special": true
964
+ },
965
+ "151763": {
966
+ "content": "|<EXTRA_TOKENS_94>|",
967
+ "lstrip": false,
968
+ "normalized": false,
969
+ "rstrip": false,
970
+ "single_word": false,
971
+ "special": true
972
+ },
973
+ "151764": {
974
+ "content": "|<EXTRA_TOKENS_95>|",
975
+ "lstrip": false,
976
+ "normalized": false,
977
+ "rstrip": false,
978
+ "single_word": false,
979
+ "special": true
980
+ },
981
+ "151765": {
982
+ "content": "|<EXTRA_TOKENS_96>|",
983
+ "lstrip": false,
984
+ "normalized": false,
985
+ "rstrip": false,
986
+ "single_word": false,
987
+ "special": true
988
+ },
989
+ "151766": {
990
+ "content": "|<EXTRA_TOKENS_97>|",
991
+ "lstrip": false,
992
+ "normalized": false,
993
+ "rstrip": false,
994
+ "single_word": false,
995
+ "special": true
996
+ },
997
+ "151767": {
998
+ "content": "|<EXTRA_TOKENS_98>|",
999
+ "lstrip": false,
1000
+ "normalized": false,
1001
+ "rstrip": false,
1002
+ "single_word": false,
1003
+ "special": true
1004
+ },
1005
+ "151768": {
1006
+ "content": "|<EXTRA_TOKENS_99>|",
1007
+ "lstrip": false,
1008
+ "normalized": false,
1009
+ "rstrip": false,
1010
+ "single_word": false,
1011
+ "special": true
1012
+ },
1013
+ "151769": {
1014
+ "content": "|<EXTRA_TOKENS_100>|",
1015
+ "lstrip": false,
1016
+ "normalized": false,
1017
+ "rstrip": false,
1018
+ "single_word": false,
1019
+ "special": true
1020
+ },
1021
+ "151770": {
1022
+ "content": "|<EXTRA_TOKENS_101>|",
1023
+ "lstrip": false,
1024
+ "normalized": false,
1025
+ "rstrip": false,
1026
+ "single_word": false,
1027
+ "special": true
1028
+ },
1029
+ "151771": {
1030
+ "content": "|<EXTRA_TOKENS_102>|",
1031
+ "lstrip": false,
1032
+ "normalized": false,
1033
+ "rstrip": false,
1034
+ "single_word": false,
1035
+ "special": true
1036
+ },
1037
+ "151772": {
1038
+ "content": "|<EXTRA_TOKENS_103>|",
1039
+ "lstrip": false,
1040
+ "normalized": false,
1041
+ "rstrip": false,
1042
+ "single_word": false,
1043
+ "special": true
1044
+ },
1045
+ "151773": {
1046
+ "content": "|<EXTRA_TOKENS_104>|",
1047
+ "lstrip": false,
1048
+ "normalized": false,
1049
+ "rstrip": false,
1050
+ "single_word": false,
1051
+ "special": true
1052
+ },
1053
+ "151774": {
1054
+ "content": "|<EXTRA_TOKENS_105>|",
1055
+ "lstrip": false,
1056
+ "normalized": false,
1057
+ "rstrip": false,
1058
+ "single_word": false,
1059
+ "special": true
1060
+ },
1061
+ "151775": {
1062
+ "content": "|<EXTRA_TOKENS_106>|",
1063
+ "lstrip": false,
1064
+ "normalized": false,
1065
+ "rstrip": false,
1066
+ "single_word": false,
1067
+ "special": true
1068
+ },
1069
+ "151776": {
1070
+ "content": "|<EXTRA_TOKENS_107>|",
1071
+ "lstrip": false,
1072
+ "normalized": false,
1073
+ "rstrip": false,
1074
+ "single_word": false,
1075
+ "special": true
1076
+ },
1077
+ "151777": {
1078
+ "content": "|<EXTRA_TOKENS_108>|",
1079
+ "lstrip": false,
1080
+ "normalized": false,
1081
+ "rstrip": false,
1082
+ "single_word": false,
1083
+ "special": true
1084
+ },
1085
+ "151778": {
1086
+ "content": "|<EXTRA_TOKENS_109>|",
1087
+ "lstrip": false,
1088
+ "normalized": false,
1089
+ "rstrip": false,
1090
+ "single_word": false,
1091
+ "special": true
1092
+ },
1093
+ "151779": {
1094
+ "content": "|<EXTRA_TOKENS_110>|",
1095
+ "lstrip": false,
1096
+ "normalized": false,
1097
+ "rstrip": false,
1098
+ "single_word": false,
1099
+ "special": true
1100
+ },
1101
+ "151780": {
1102
+ "content": "|<EXTRA_TOKENS_111>|",
1103
+ "lstrip": false,
1104
+ "normalized": false,
1105
+ "rstrip": false,
1106
+ "single_word": false,
1107
+ "special": true
1108
+ },
1109
+ "151781": {
1110
+ "content": "|<EXTRA_TOKENS_112>|",
1111
+ "lstrip": false,
1112
+ "normalized": false,
1113
+ "rstrip": false,
1114
+ "single_word": false,
1115
+ "special": true
1116
+ },
1117
+ "151782": {
1118
+ "content": "|<EXTRA_TOKENS_113>|",
1119
+ "lstrip": false,
1120
+ "normalized": false,
1121
+ "rstrip": false,
1122
+ "single_word": false,
1123
+ "special": true
1124
+ },
1125
+ "151783": {
1126
+ "content": "|<EXTRA_TOKENS_114>|",
1127
+ "lstrip": false,
1128
+ "normalized": false,
1129
+ "rstrip": false,
1130
+ "single_word": false,
1131
+ "special": true
1132
+ },
1133
+ "151784": {
1134
+ "content": "|<EXTRA_TOKENS_115>|",
1135
+ "lstrip": false,
1136
+ "normalized": false,
1137
+ "rstrip": false,
1138
+ "single_word": false,
1139
+ "special": true
1140
+ },
1141
+ "151785": {
1142
+ "content": "|<EXTRA_TOKENS_116>|",
1143
+ "lstrip": false,
1144
+ "normalized": false,
1145
+ "rstrip": false,
1146
+ "single_word": false,
1147
+ "special": true
1148
+ },
1149
+ "151786": {
1150
+ "content": "|<EXTRA_TOKENS_117>|",
1151
+ "lstrip": false,
1152
+ "normalized": false,
1153
+ "rstrip": false,
1154
+ "single_word": false,
1155
+ "special": true
1156
+ },
1157
+ "151787": {
1158
+ "content": "|<EXTRA_TOKENS_118>|",
1159
+ "lstrip": false,
1160
+ "normalized": false,
1161
+ "rstrip": false,
1162
+ "single_word": false,
1163
+ "special": true
1164
+ },
1165
+ "151788": {
1166
+ "content": "|<EXTRA_TOKENS_119>|",
1167
+ "lstrip": false,
1168
+ "normalized": false,
1169
+ "rstrip": false,
1170
+ "single_word": false,
1171
+ "special": true
1172
+ },
1173
+ "151789": {
1174
+ "content": "|<EXTRA_TOKENS_120>|",
1175
+ "lstrip": false,
1176
+ "normalized": false,
1177
+ "rstrip": false,
1178
+ "single_word": false,
1179
+ "special": true
1180
+ },
1181
+ "151790": {
1182
+ "content": "|<EXTRA_TOKENS_121>|",
1183
+ "lstrip": false,
1184
+ "normalized": false,
1185
+ "rstrip": false,
1186
+ "single_word": false,
1187
+ "special": true
1188
+ },
1189
+ "151791": {
1190
+ "content": "|<EXTRA_TOKENS_122>|",
1191
+ "lstrip": false,
1192
+ "normalized": false,
1193
+ "rstrip": false,
1194
+ "single_word": false,
1195
+ "special": true
1196
+ },
1197
+ "151792": {
1198
+ "content": "|<EXTRA_TOKENS_123>|",
1199
+ "lstrip": false,
1200
+ "normalized": false,
1201
+ "rstrip": false,
1202
+ "single_word": false,
1203
+ "special": true
1204
+ },
1205
+ "151793": {
1206
+ "content": "|<EXTRA_TOKENS_124>|",
1207
+ "lstrip": false,
1208
+ "normalized": false,
1209
+ "rstrip": false,
1210
+ "single_word": false,
1211
+ "special": true
1212
+ },
1213
+ "151794": {
1214
+ "content": "|<EXTRA_TOKENS_125>|",
1215
+ "lstrip": false,
1216
+ "normalized": false,
1217
+ "rstrip": false,
1218
+ "single_word": false,
1219
+ "special": true
1220
+ },
1221
+ "151795": {
1222
+ "content": "|<EXTRA_TOKENS_126>|",
1223
+ "lstrip": false,
1224
+ "normalized": false,
1225
+ "rstrip": false,
1226
+ "single_word": false,
1227
+ "special": true
1228
+ },
1229
+ "151796": {
1230
+ "content": "|<EXTRA_TOKENS_127>|",
1231
+ "lstrip": false,
1232
+ "normalized": false,
1233
+ "rstrip": false,
1234
+ "single_word": false,
1235
+ "special": true
1236
+ },
1237
+ "151797": {
1238
+ "content": "|<EXTRA_TOKENS_128>|",
1239
+ "lstrip": false,
1240
+ "normalized": false,
1241
+ "rstrip": false,
1242
+ "single_word": false,
1243
+ "special": true
1244
+ },
1245
+ "151798": {
1246
+ "content": "|<EXTRA_TOKENS_129>|",
1247
+ "lstrip": false,
1248
+ "normalized": false,
1249
+ "rstrip": false,
1250
+ "single_word": false,
1251
+ "special": true
1252
+ },
1253
+ "151799": {
1254
+ "content": "|<EXTRA_TOKENS_130>|",
1255
+ "lstrip": false,
1256
+ "normalized": false,
1257
+ "rstrip": false,
1258
+ "single_word": false,
1259
+ "special": true
1260
+ },
1261
+ "151800": {
1262
+ "content": "|<EXTRA_TOKENS_131>|",
1263
+ "lstrip": false,
1264
+ "normalized": false,
1265
+ "rstrip": false,
1266
+ "single_word": false,
1267
+ "special": true
1268
+ },
1269
+ "151801": {
1270
+ "content": "|<EXTRA_TOKENS_132>|",
1271
+ "lstrip": false,
1272
+ "normalized": false,
1273
+ "rstrip": false,
1274
+ "single_word": false,
1275
+ "special": true
1276
+ },
1277
+ "151802": {
1278
+ "content": "|<EXTRA_TOKENS_133>|",
1279
+ "lstrip": false,
1280
+ "normalized": false,
1281
+ "rstrip": false,
1282
+ "single_word": false,
1283
+ "special": true
1284
+ },
1285
+ "151803": {
1286
+ "content": "|<EXTRA_TOKENS_134>|",
1287
+ "lstrip": false,
1288
+ "normalized": false,
1289
+ "rstrip": false,
1290
+ "single_word": false,
1291
+ "special": true
1292
+ },
1293
+ "151804": {
1294
+ "content": "|<EXTRA_TOKENS_135>|",
1295
+ "lstrip": false,
1296
+ "normalized": false,
1297
+ "rstrip": false,
1298
+ "single_word": false,
1299
+ "special": true
1300
+ },
1301
+ "151805": {
1302
+ "content": "|<EXTRA_TOKENS_136>|",
1303
+ "lstrip": false,
1304
+ "normalized": false,
1305
+ "rstrip": false,
1306
+ "single_word": false,
1307
+ "special": true
1308
+ },
1309
+ "151806": {
1310
+ "content": "|<EXTRA_TOKENS_137>|",
1311
+ "lstrip": false,
1312
+ "normalized": false,
1313
+ "rstrip": false,
1314
+ "single_word": false,
1315
+ "special": true
1316
+ },
1317
+ "151807": {
1318
+ "content": "|<EXTRA_TOKENS_138>|",
1319
+ "lstrip": false,
1320
+ "normalized": false,
1321
+ "rstrip": false,
1322
+ "single_word": false,
1323
+ "special": true
1324
+ },
1325
+ "151808": {
1326
+ "content": "|<EXTRA_TOKENS_139>|",
1327
+ "lstrip": false,
1328
+ "normalized": false,
1329
+ "rstrip": false,
1330
+ "single_word": false,
1331
+ "special": true
1332
+ },
1333
+ "151809": {
1334
+ "content": "|<EXTRA_TOKENS_140>|",
1335
+ "lstrip": false,
1336
+ "normalized": false,
1337
+ "rstrip": false,
1338
+ "single_word": false,
1339
+ "special": true
1340
+ },
1341
+ "151810": {
1342
+ "content": "|<EXTRA_TOKENS_141>|",
1343
+ "lstrip": false,
1344
+ "normalized": false,
1345
+ "rstrip": false,
1346
+ "single_word": false,
1347
+ "special": true
1348
+ },
1349
+ "151811": {
1350
+ "content": "|<EXTRA_TOKENS_142>|",
1351
+ "lstrip": false,
1352
+ "normalized": false,
1353
+ "rstrip": false,
1354
+ "single_word": false,
1355
+ "special": true
1356
+ },
1357
+ "151812": {
1358
+ "content": "|<EXTRA_TOKENS_143>|",
1359
+ "lstrip": false,
1360
+ "normalized": false,
1361
+ "rstrip": false,
1362
+ "single_word": false,
1363
+ "special": true
1364
+ },
1365
+ "151813": {
1366
+ "content": "|<EXTRA_TOKENS_144>|",
1367
+ "lstrip": false,
1368
+ "normalized": false,
1369
+ "rstrip": false,
1370
+ "single_word": false,
1371
+ "special": true
1372
+ },
1373
+ "151814": {
1374
+ "content": "|<EXTRA_TOKENS_145>|",
1375
+ "lstrip": false,
1376
+ "normalized": false,
1377
+ "rstrip": false,
1378
+ "single_word": false,
1379
+ "special": true
1380
+ },
1381
+ "151815": {
1382
+ "content": "|<EXTRA_TOKENS_146>|",
1383
+ "lstrip": false,
1384
+ "normalized": false,
1385
+ "rstrip": false,
1386
+ "single_word": false,
1387
+ "special": true
1388
+ },
1389
+ "151816": {
1390
+ "content": "|<EXTRA_TOKENS_147>|",
1391
+ "lstrip": false,
1392
+ "normalized": false,
1393
+ "rstrip": false,
1394
+ "single_word": false,
1395
+ "special": true
1396
+ },
1397
+ "151817": {
1398
+ "content": "|<EXTRA_TOKENS_148>|",
1399
+ "lstrip": false,
1400
+ "normalized": false,
1401
+ "rstrip": false,
1402
+ "single_word": false,
1403
+ "special": true
1404
+ },
1405
+ "151818": {
1406
+ "content": "|<EXTRA_TOKENS_149>|",
1407
+ "lstrip": false,
1408
+ "normalized": false,
1409
+ "rstrip": false,
1410
+ "single_word": false,
1411
+ "special": true
1412
+ },
1413
+ "151819": {
1414
+ "content": "|<EXTRA_TOKENS_150>|",
1415
+ "lstrip": false,
1416
+ "normalized": false,
1417
+ "rstrip": false,
1418
+ "single_word": false,
1419
+ "special": true
1420
+ },
1421
+ "151820": {
1422
+ "content": "|<EXTRA_TOKENS_151>|",
1423
+ "lstrip": false,
1424
+ "normalized": false,
1425
+ "rstrip": false,
1426
+ "single_word": false,
1427
+ "special": true
1428
+ },
1429
+ "151821": {
1430
+ "content": "|<EXTRA_TOKENS_152>|",
1431
+ "lstrip": false,
1432
+ "normalized": false,
1433
+ "rstrip": false,
1434
+ "single_word": false,
1435
+ "special": true
1436
+ },
1437
+ "151822": {
1438
+ "content": "|<EXTRA_TOKENS_153>|",
1439
+ "lstrip": false,
1440
+ "normalized": false,
1441
+ "rstrip": false,
1442
+ "single_word": false,
1443
+ "special": true
1444
+ },
1445
+ "151823": {
1446
+ "content": "|<EXTRA_TOKENS_154>|",
1447
+ "lstrip": false,
1448
+ "normalized": false,
1449
+ "rstrip": false,
1450
+ "single_word": false,
1451
+ "special": true
1452
+ },
1453
+ "151824": {
1454
+ "content": "|<EXTRA_TOKENS_155>|",
1455
+ "lstrip": false,
1456
+ "normalized": false,
1457
+ "rstrip": false,
1458
+ "single_word": false,
1459
+ "special": true
1460
+ },
1461
+ "151825": {
1462
+ "content": "|<EXTRA_TOKENS_156>|",
1463
+ "lstrip": false,
1464
+ "normalized": false,
1465
+ "rstrip": false,
1466
+ "single_word": false,
1467
+ "special": true
1468
+ },
1469
+ "151826": {
1470
+ "content": "|<EXTRA_TOKENS_157>|",
1471
+ "lstrip": false,
1472
+ "normalized": false,
1473
+ "rstrip": false,
1474
+ "single_word": false,
1475
+ "special": true
1476
+ },
1477
+ "151827": {
1478
+ "content": "|<EXTRA_TOKENS_158>|",
1479
+ "lstrip": false,
1480
+ "normalized": false,
1481
+ "rstrip": false,
1482
+ "single_word": false,
1483
+ "special": true
1484
+ },
1485
+ "151828": {
1486
+ "content": "|<EXTRA_TOKENS_159>|",
1487
+ "lstrip": false,
1488
+ "normalized": false,
1489
+ "rstrip": false,
1490
+ "single_word": false,
1491
+ "special": true
1492
+ },
1493
+ "151829": {
1494
+ "content": "|<EXTRA_TOKENS_160>|",
1495
+ "lstrip": false,
1496
+ "normalized": false,
1497
+ "rstrip": false,
1498
+ "single_word": false,
1499
+ "special": true
1500
+ },
1501
+ "151830": {
1502
+ "content": "|<EXTRA_TOKENS_161>|",
1503
+ "lstrip": false,
1504
+ "normalized": false,
1505
+ "rstrip": false,
1506
+ "single_word": false,
1507
+ "special": true
1508
+ },
1509
+ "151831": {
1510
+ "content": "|<EXTRA_TOKENS_162>|",
1511
+ "lstrip": false,
1512
+ "normalized": false,
1513
+ "rstrip": false,
1514
+ "single_word": false,
1515
+ "special": true
1516
+ },
1517
+ "151832": {
1518
+ "content": "|<EXTRA_TOKENS_163>|",
1519
+ "lstrip": false,
1520
+ "normalized": false,
1521
+ "rstrip": false,
1522
+ "single_word": false,
1523
+ "special": true
1524
+ },
1525
+ "151833": {
1526
+ "content": "|<EXTRA_TOKENS_164>|",
1527
+ "lstrip": false,
1528
+ "normalized": false,
1529
+ "rstrip": false,
1530
+ "single_word": false,
1531
+ "special": true
1532
+ },
1533
+ "151834": {
1534
+ "content": "|<EXTRA_TOKENS_165>|",
1535
+ "lstrip": false,
1536
+ "normalized": false,
1537
+ "rstrip": false,
1538
+ "single_word": false,
1539
+ "special": true
1540
+ },
1541
+ "151835": {
1542
+ "content": "|<EXTRA_TOKENS_166>|",
1543
+ "lstrip": false,
1544
+ "normalized": false,
1545
+ "rstrip": false,
1546
+ "single_word": false,
1547
+ "special": true
1548
+ },
1549
+ "151836": {
1550
+ "content": "|<EXTRA_TOKENS_167>|",
1551
+ "lstrip": false,
1552
+ "normalized": false,
1553
+ "rstrip": false,
1554
+ "single_word": false,
1555
+ "special": true
1556
+ },
1557
+ "151837": {
1558
+ "content": "|<EXTRA_TOKENS_168>|",
1559
+ "lstrip": false,
1560
+ "normalized": false,
1561
+ "rstrip": false,
1562
+ "single_word": false,
1563
+ "special": true
1564
+ },
1565
+ "151838": {
1566
+ "content": "|<EXTRA_TOKENS_169>|",
1567
+ "lstrip": false,
1568
+ "normalized": false,
1569
+ "rstrip": false,
1570
+ "single_word": false,
1571
+ "special": true
1572
+ },
1573
+ "151839": {
1574
+ "content": "|<EXTRA_TOKENS_170>|",
1575
+ "lstrip": false,
1576
+ "normalized": false,
1577
+ "rstrip": false,
1578
+ "single_word": false,
1579
+ "special": true
1580
+ },
1581
+ "151840": {
1582
+ "content": "|<EXTRA_TOKENS_171>|",
1583
+ "lstrip": false,
1584
+ "normalized": false,
1585
+ "rstrip": false,
1586
+ "single_word": false,
1587
+ "special": true
1588
+ },
1589
+ "151841": {
1590
+ "content": "|<EXTRA_TOKENS_172>|",
1591
+ "lstrip": false,
1592
+ "normalized": false,
1593
+ "rstrip": false,
1594
+ "single_word": false,
1595
+ "special": true
1596
+ },
1597
+ "151842": {
1598
+ "content": "|<EXTRA_TOKENS_173>|",
1599
+ "lstrip": false,
1600
+ "normalized": false,
1601
+ "rstrip": false,
1602
+ "single_word": false,
1603
+ "special": true
1604
+ },
1605
+ "151843": {
1606
+ "content": "|<EXTRA_TOKENS_174>|",
1607
+ "lstrip": false,
1608
+ "normalized": false,
1609
+ "rstrip": false,
1610
+ "single_word": false,
1611
+ "special": true
1612
+ },
1613
+ "151844": {
1614
+ "content": "|<EXTRA_TOKENS_175>|",
1615
+ "lstrip": false,
1616
+ "normalized": false,
1617
+ "rstrip": false,
1618
+ "single_word": false,
1619
+ "special": true
1620
+ },
1621
+ "151845": {
1622
+ "content": "|<EXTRA_TOKENS_176>|",
1623
+ "lstrip": false,
1624
+ "normalized": false,
1625
+ "rstrip": false,
1626
+ "single_word": false,
1627
+ "special": true
1628
+ },
1629
+ "151846": {
1630
+ "content": "|<EXTRA_TOKENS_177>|",
1631
+ "lstrip": false,
1632
+ "normalized": false,
1633
+ "rstrip": false,
1634
+ "single_word": false,
1635
+ "special": true
1636
+ },
1637
+ "151847": {
1638
+ "content": "|<EXTRA_TOKENS_178>|",
1639
+ "lstrip": false,
1640
+ "normalized": false,
1641
+ "rstrip": false,
1642
+ "single_word": false,
1643
+ "special": true
1644
+ },
1645
+ "151848": {
1646
+ "content": "|<EXTRA_TOKENS_179>|",
1647
+ "lstrip": false,
1648
+ "normalized": false,
1649
+ "rstrip": false,
1650
+ "single_word": false,
1651
+ "special": true
1652
+ },
1653
+ "151849": {
1654
+ "content": "|<EXTRA_TOKENS_180>|",
1655
+ "lstrip": false,
1656
+ "normalized": false,
1657
+ "rstrip": false,
1658
+ "single_word": false,
1659
+ "special": true
1660
+ },
1661
+ "151850": {
1662
+ "content": "|<EXTRA_TOKENS_181>|",
1663
+ "lstrip": false,
1664
+ "normalized": false,
1665
+ "rstrip": false,
1666
+ "single_word": false,
1667
+ "special": true
1668
+ },
1669
+ "151851": {
1670
+ "content": "|<EXTRA_TOKENS_182>|",
1671
+ "lstrip": false,
1672
+ "normalized": false,
1673
+ "rstrip": false,
1674
+ "single_word": false,
1675
+ "special": true
1676
+ },
1677
+ "151852": {
1678
+ "content": "|<EXTRA_TOKENS_183>|",
1679
+ "lstrip": false,
1680
+ "normalized": false,
1681
+ "rstrip": false,
1682
+ "single_word": false,
1683
+ "special": true
1684
+ },
1685
+ "151853": {
1686
+ "content": "|<EXTRA_TOKENS_184>|",
1687
+ "lstrip": false,
1688
+ "normalized": false,
1689
+ "rstrip": false,
1690
+ "single_word": false,
1691
+ "special": true
1692
+ },
1693
+ "151854": {
1694
+ "content": "|<EXTRA_TOKENS_185>|",
1695
+ "lstrip": false,
1696
+ "normalized": false,
1697
+ "rstrip": false,
1698
+ "single_word": false,
1699
+ "special": true
1700
+ },
1701
+ "151855": {
1702
+ "content": "|<EXTRA_TOKENS_186>|",
1703
+ "lstrip": false,
1704
+ "normalized": false,
1705
+ "rstrip": false,
1706
+ "single_word": false,
1707
+ "special": true
1708
+ },
1709
+ "151856": {
1710
+ "content": "|<EXTRA_TOKENS_187>|",
1711
+ "lstrip": false,
1712
+ "normalized": false,
1713
+ "rstrip": false,
1714
+ "single_word": false,
1715
+ "special": true
1716
+ },
1717
+ "151857": {
1718
+ "content": "|<EXTRA_TOKENS_188>|",
1719
+ "lstrip": false,
1720
+ "normalized": false,
1721
+ "rstrip": false,
1722
+ "single_word": false,
1723
+ "special": true
1724
+ },
1725
+ "151858": {
1726
+ "content": "|<EXTRA_TOKENS_189>|",
1727
+ "lstrip": false,
1728
+ "normalized": false,
1729
+ "rstrip": false,
1730
+ "single_word": false,
1731
+ "special": true
1732
+ },
1733
+ "151859": {
1734
+ "content": "|<EXTRA_TOKENS_190>|",
1735
+ "lstrip": false,
1736
+ "normalized": false,
1737
+ "rstrip": false,
1738
+ "single_word": false,
1739
+ "special": true
1740
+ },
1741
+ "151860": {
1742
+ "content": "|<EXTRA_TOKENS_191>|",
1743
+ "lstrip": false,
1744
+ "normalized": false,
1745
+ "rstrip": false,
1746
+ "single_word": false,
1747
+ "special": true
1748
+ },
1749
+ "151861": {
1750
+ "content": "|<EXTRA_TOKENS_192>|",
1751
+ "lstrip": false,
1752
+ "normalized": false,
1753
+ "rstrip": false,
1754
+ "single_word": false,
1755
+ "special": true
1756
+ },
1757
+ "151862": {
1758
+ "content": "|<EXTRA_TOKENS_193>|",
1759
+ "lstrip": false,
1760
+ "normalized": false,
1761
+ "rstrip": false,
1762
+ "single_word": false,
1763
+ "special": true
1764
+ },
1765
+ "151863": {
1766
+ "content": "|<EXTRA_TOKENS_194>|",
1767
+ "lstrip": false,
1768
+ "normalized": false,
1769
+ "rstrip": false,
1770
+ "single_word": false,
1771
+ "special": true
1772
+ },
1773
+ "151864": {
1774
+ "content": "|<EXTRA_TOKENS_195>|",
1775
+ "lstrip": false,
1776
+ "normalized": false,
1777
+ "rstrip": false,
1778
+ "single_word": false,
1779
+ "special": true
1780
+ },
1781
+ "151865": {
1782
+ "content": "|<EXTRA_TOKENS_196>|",
1783
+ "lstrip": false,
1784
+ "normalized": false,
1785
+ "rstrip": false,
1786
+ "single_word": false,
1787
+ "special": true
1788
+ },
1789
+ "151866": {
1790
+ "content": "|<EXTRA_TOKENS_197>|",
1791
+ "lstrip": false,
1792
+ "normalized": false,
1793
+ "rstrip": false,
1794
+ "single_word": false,
1795
+ "special": true
1796
+ },
1797
+ "151867": {
1798
+ "content": "|<EXTRA_TOKENS_198>|",
1799
+ "lstrip": false,
1800
+ "normalized": false,
1801
+ "rstrip": false,
1802
+ "single_word": false,
1803
+ "special": true
1804
+ },
1805
+ "151868": {
1806
+ "content": "|<EXTRA_TOKENS_199>|",
1807
+ "lstrip": false,
1808
+ "normalized": false,
1809
+ "rstrip": false,
1810
+ "single_word": false,
1811
+ "special": true
1812
+ },
1813
+ "151869": {
1814
+ "content": "|<EXTRA_TOKENS_200>|",
1815
+ "lstrip": false,
1816
+ "normalized": false,
1817
+ "rstrip": false,
1818
+ "single_word": false,
1819
+ "special": true
1820
+ },
1821
+ "151870": {
1822
+ "content": "|<EXTRA_TOKENS_201>|",
1823
+ "lstrip": false,
1824
+ "normalized": false,
1825
+ "rstrip": false,
1826
+ "single_word": false,
1827
+ "special": true
1828
+ },
1829
+ "151871": {
1830
+ "content": "|<EXTRA_TOKENS_202>|",
1831
+ "lstrip": false,
1832
+ "normalized": false,
1833
+ "rstrip": false,
1834
+ "single_word": false,
1835
+ "special": true
1836
+ },
1837
+ "151872": {
1838
+ "content": "|<EXTRA_TOKENS_203>|",
1839
+ "lstrip": false,
1840
+ "normalized": false,
1841
+ "rstrip": false,
1842
+ "single_word": false,
1843
+ "special": true
1844
+ },
1845
+ "151873": {
1846
+ "content": "|<EXTRA_TOKENS_204>|",
1847
+ "lstrip": false,
1848
+ "normalized": false,
1849
+ "rstrip": false,
1850
+ "single_word": false,
1851
+ "special": true
1852
+ },
1853
+ "151874": {
1854
+ "content": "|<EXTRA_TOKENS_205>|",
1855
+ "lstrip": false,
1856
+ "normalized": false,
1857
+ "rstrip": false,
1858
+ "single_word": false,
1859
+ "special": true
1860
+ },
1861
+ "151875": {
1862
+ "content": "|<EXTRA_TOKENS_206>|",
1863
+ "lstrip": false,
1864
+ "normalized": false,
1865
+ "rstrip": false,
1866
+ "single_word": false,
1867
+ "special": true
1868
+ },
1869
+ "151876": {
1870
+ "content": "|<EXTRA_TOKENS_207>|",
1871
+ "lstrip": false,
1872
+ "normalized": false,
1873
+ "rstrip": false,
1874
+ "single_word": false,
1875
+ "special": true
1876
+ },
1877
+ "151877": {
1878
+ "content": "|<EXTRA_TOKENS_208>|",
1879
+ "lstrip": false,
1880
+ "normalized": false,
1881
+ "rstrip": false,
1882
+ "single_word": false,
1883
+ "special": true
1884
+ },
1885
+ "151878": {
1886
+ "content": "|<EXTRA_TOKENS_209>|",
1887
+ "lstrip": false,
1888
+ "normalized": false,
1889
+ "rstrip": false,
1890
+ "single_word": false,
1891
+ "special": true
1892
+ },
1893
+ "151879": {
1894
+ "content": "|<EXTRA_TOKENS_210>|",
1895
+ "lstrip": false,
1896
+ "normalized": false,
1897
+ "rstrip": false,
1898
+ "single_word": false,
1899
+ "special": true
1900
+ },
1901
+ "151880": {
1902
+ "content": "|<EXTRA_TOKENS_211>|",
1903
+ "lstrip": false,
1904
+ "normalized": false,
1905
+ "rstrip": false,
1906
+ "single_word": false,
1907
+ "special": true
1908
+ },
1909
+ "151881": {
1910
+ "content": "|<EXTRA_TOKENS_212>|",
1911
+ "lstrip": false,
1912
+ "normalized": false,
1913
+ "rstrip": false,
1914
+ "single_word": false,
1915
+ "special": true
1916
+ },
1917
+ "151882": {
1918
+ "content": "|<EXTRA_TOKENS_213>|",
1919
+ "lstrip": false,
1920
+ "normalized": false,
1921
+ "rstrip": false,
1922
+ "single_word": false,
1923
+ "special": true
1924
+ },
1925
+ "151883": {
1926
+ "content": "|<EXTRA_TOKENS_214>|",
1927
+ "lstrip": false,
1928
+ "normalized": false,
1929
+ "rstrip": false,
1930
+ "single_word": false,
1931
+ "special": true
1932
+ },
1933
+ "151884": {
1934
+ "content": "|<EXTRA_TOKENS_215>|",
1935
+ "lstrip": false,
1936
+ "normalized": false,
1937
+ "rstrip": false,
1938
+ "single_word": false,
1939
+ "special": true
1940
+ },
1941
+ "151885": {
1942
+ "content": "|<EXTRA_TOKENS_216>|",
1943
+ "lstrip": false,
1944
+ "normalized": false,
1945
+ "rstrip": false,
1946
+ "single_word": false,
1947
+ "special": true
1948
+ },
1949
+ "151886": {
1950
+ "content": "|<EXTRA_TOKENS_217>|",
1951
+ "lstrip": false,
1952
+ "normalized": false,
1953
+ "rstrip": false,
1954
+ "single_word": false,
1955
+ "special": true
1956
+ },
1957
+ "151887": {
1958
+ "content": "|<EXTRA_TOKENS_218>|",
1959
+ "lstrip": false,
1960
+ "normalized": false,
1961
+ "rstrip": false,
1962
+ "single_word": false,
1963
+ "special": true
1964
+ },
1965
+ "151888": {
1966
+ "content": "|<EXTRA_TOKENS_219>|",
1967
+ "lstrip": false,
1968
+ "normalized": false,
1969
+ "rstrip": false,
1970
+ "single_word": false,
1971
+ "special": true
1972
+ },
1973
+ "151889": {
1974
+ "content": "|<EXTRA_TOKENS_220>|",
1975
+ "lstrip": false,
1976
+ "normalized": false,
1977
+ "rstrip": false,
1978
+ "single_word": false,
1979
+ "special": true
1980
+ },
1981
+ "151890": {
1982
+ "content": "|<EXTRA_TOKENS_221>|",
1983
+ "lstrip": false,
1984
+ "normalized": false,
1985
+ "rstrip": false,
1986
+ "single_word": false,
1987
+ "special": true
1988
+ },
1989
+ "151891": {
1990
+ "content": "|<EXTRA_TOKENS_222>|",
1991
+ "lstrip": false,
1992
+ "normalized": false,
1993
+ "rstrip": false,
1994
+ "single_word": false,
1995
+ "special": true
1996
+ },
1997
+ "151892": {
1998
+ "content": "|<EXTRA_TOKENS_223>|",
1999
+ "lstrip": false,
2000
+ "normalized": false,
2001
+ "rstrip": false,
2002
+ "single_word": false,
2003
+ "special": true
2004
+ },
2005
+ "151893": {
2006
+ "content": "|<EXTRA_TOKENS_224>|",
2007
+ "lstrip": false,
2008
+ "normalized": false,
2009
+ "rstrip": false,
2010
+ "single_word": false,
2011
+ "special": true
2012
+ },
2013
+ "151894": {
2014
+ "content": "|<EXTRA_TOKENS_225>|",
2015
+ "lstrip": false,
2016
+ "normalized": false,
2017
+ "rstrip": false,
2018
+ "single_word": false,
2019
+ "special": true
2020
+ },
2021
+ "151895": {
2022
+ "content": "|<EXTRA_TOKENS_226>|",
2023
+ "lstrip": false,
2024
+ "normalized": false,
2025
+ "rstrip": false,
2026
+ "single_word": false,
2027
+ "special": true
2028
+ },
2029
+ "151896": {
2030
+ "content": "|<EXTRA_TOKENS_227>|",
2031
+ "lstrip": false,
2032
+ "normalized": false,
2033
+ "rstrip": false,
2034
+ "single_word": false,
2035
+ "special": true
2036
+ },
2037
+ "151897": {
2038
+ "content": "|<EXTRA_TOKENS_228>|",
2039
+ "lstrip": false,
2040
+ "normalized": false,
2041
+ "rstrip": false,
2042
+ "single_word": false,
2043
+ "special": true
2044
+ },
2045
+ "151898": {
2046
+ "content": "|<EXTRA_TOKENS_229>|",
2047
+ "lstrip": false,
2048
+ "normalized": false,
2049
+ "rstrip": false,
2050
+ "single_word": false,
2051
+ "special": true
2052
+ },
2053
+ "151899": {
2054
+ "content": "|<EXTRA_TOKENS_230>|",
2055
+ "lstrip": false,
2056
+ "normalized": false,
2057
+ "rstrip": false,
2058
+ "single_word": false,
2059
+ "special": true
2060
+ },
2061
+ "151900": {
2062
+ "content": "|<EXTRA_TOKENS_231>|",
2063
+ "lstrip": false,
2064
+ "normalized": false,
2065
+ "rstrip": false,
2066
+ "single_word": false,
2067
+ "special": true
2068
+ },
2069
+ "151901": {
2070
+ "content": "|<EXTRA_TOKENS_232>|",
2071
+ "lstrip": false,
2072
+ "normalized": false,
2073
+ "rstrip": false,
2074
+ "single_word": false,
2075
+ "special": true
2076
+ },
2077
+ "151902": {
2078
+ "content": "|<EXTRA_TOKENS_233>|",
2079
+ "lstrip": false,
2080
+ "normalized": false,
2081
+ "rstrip": false,
2082
+ "single_word": false,
2083
+ "special": true
2084
+ },
2085
+ "151903": {
2086
+ "content": "|<EXTRA_TOKENS_234>|",
2087
+ "lstrip": false,
2088
+ "normalized": false,
2089
+ "rstrip": false,
2090
+ "single_word": false,
2091
+ "special": true
2092
+ },
2093
+ "151904": {
2094
+ "content": "|<EXTRA_TOKENS_235>|",
2095
+ "lstrip": false,
2096
+ "normalized": false,
2097
+ "rstrip": false,
2098
+ "single_word": false,
2099
+ "special": true
2100
+ },
2101
+ "151905": {
2102
+ "content": "|<EXTRA_TOKENS_236>|",
2103
+ "lstrip": false,
2104
+ "normalized": false,
2105
+ "rstrip": false,
2106
+ "single_word": false,
2107
+ "special": true
2108
+ },
2109
+ "151906": {
2110
+ "content": "|<EXTRA_TOKENS_237>|",
2111
+ "lstrip": false,
2112
+ "normalized": false,
2113
+ "rstrip": false,
2114
+ "single_word": false,
2115
+ "special": true
2116
+ },
2117
+ "151907": {
2118
+ "content": "|<EXTRA_TOKENS_238>|",
2119
+ "lstrip": false,
2120
+ "normalized": false,
2121
+ "rstrip": false,
2122
+ "single_word": false,
2123
+ "special": true
2124
+ },
2125
+ "151908": {
2126
+ "content": "|<EXTRA_TOKENS_239>|",
2127
+ "lstrip": false,
2128
+ "normalized": false,
2129
+ "rstrip": false,
2130
+ "single_word": false,
2131
+ "special": true
2132
+ },
2133
+ "151909": {
2134
+ "content": "|<EXTRA_TOKENS_240>|",
2135
+ "lstrip": false,
2136
+ "normalized": false,
2137
+ "rstrip": false,
2138
+ "single_word": false,
2139
+ "special": true
2140
+ },
2141
+ "151910": {
2142
+ "content": "|<EXTRA_TOKENS_241>|",
2143
+ "lstrip": false,
2144
+ "normalized": false,
2145
+ "rstrip": false,
2146
+ "single_word": false,
2147
+ "special": true
2148
+ },
2149
+ "151911": {
2150
+ "content": "|<EXTRA_TOKENS_242>|",
2151
+ "lstrip": false,
2152
+ "normalized": false,
2153
+ "rstrip": false,
2154
+ "single_word": false,
2155
+ "special": true
2156
+ },
2157
+ "151912": {
2158
+ "content": "|<EXTRA_TOKENS_243>|",
2159
+ "lstrip": false,
2160
+ "normalized": false,
2161
+ "rstrip": false,
2162
+ "single_word": false,
2163
+ "special": true
2164
+ },
2165
+ "151913": {
2166
+ "content": "|<EXTRA_TOKENS_244>|",
2167
+ "lstrip": false,
2168
+ "normalized": false,
2169
+ "rstrip": false,
2170
+ "single_word": false,
2171
+ "special": true
2172
+ },
2173
+ "151914": {
2174
+ "content": "|<EXTRA_TOKENS_245>|",
2175
+ "lstrip": false,
2176
+ "normalized": false,
2177
+ "rstrip": false,
2178
+ "single_word": false,
2179
+ "special": true
2180
+ },
2181
+ "151915": {
2182
+ "content": "|<EXTRA_TOKENS_246>|",
2183
+ "lstrip": false,
2184
+ "normalized": false,
2185
+ "rstrip": false,
2186
+ "single_word": false,
2187
+ "special": true
2188
+ },
2189
+ "151916": {
2190
+ "content": "|<EXTRA_TOKENS_247>|",
2191
+ "lstrip": false,
2192
+ "normalized": false,
2193
+ "rstrip": false,
2194
+ "single_word": false,
2195
+ "special": true
2196
+ },
2197
+ "151917": {
2198
+ "content": "|<EXTRA_TOKENS_248>|",
2199
+ "lstrip": false,
2200
+ "normalized": false,
2201
+ "rstrip": false,
2202
+ "single_word": false,
2203
+ "special": true
2204
+ },
2205
+ "151918": {
2206
+ "content": "|<EXTRA_TOKENS_249>|",
2207
+ "lstrip": false,
2208
+ "normalized": false,
2209
+ "rstrip": false,
2210
+ "single_word": false,
2211
+ "special": true
2212
+ },
2213
+ "151919": {
2214
+ "content": "|<EXTRA_TOKENS_250>|",
2215
+ "lstrip": false,
2216
+ "normalized": false,
2217
+ "rstrip": false,
2218
+ "single_word": false,
2219
+ "special": true
2220
+ },
2221
+ "151920": {
2222
+ "content": "|<EXTRA_TOKENS_251>|",
2223
+ "lstrip": false,
2224
+ "normalized": false,
2225
+ "rstrip": false,
2226
+ "single_word": false,
2227
+ "special": true
2228
+ },
2229
+ "151921": {
2230
+ "content": "|<EXTRA_TOKENS_252>|",
2231
+ "lstrip": false,
2232
+ "normalized": false,
2233
+ "rstrip": false,
2234
+ "single_word": false,
2235
+ "special": true
2236
+ },
2237
+ "151922": {
2238
+ "content": "|<EXTRA_TOKENS_253>|",
2239
+ "lstrip": false,
2240
+ "normalized": false,
2241
+ "rstrip": false,
2242
+ "single_word": false,
2243
+ "special": true
2244
+ },
2245
+ "151923": {
2246
+ "content": "|<EXTRA_TOKENS_254>|",
2247
+ "lstrip": false,
2248
+ "normalized": false,
2249
+ "rstrip": false,
2250
+ "single_word": false,
2251
+ "special": true
2252
+ },
2253
+ "151924": {
2254
+ "content": "|<EXTRA_TOKENS_255>|",
2255
+ "lstrip": false,
2256
+ "normalized": false,
2257
+ "rstrip": false,
2258
+ "single_word": false,
2259
+ "special": true
2260
+ },
2261
+ "151925": {
2262
+ "content": "|<EXTRA_TOKENS_256>|",
2263
+ "lstrip": false,
2264
+ "normalized": false,
2265
+ "rstrip": false,
2266
+ "single_word": false,
2267
+ "special": true
2268
+ },
2269
+ "151926": {
2270
+ "content": "|<EXTRA_TOKENS_257>|",
2271
+ "lstrip": false,
2272
+ "normalized": false,
2273
+ "rstrip": false,
2274
+ "single_word": false,
2275
+ "special": true
2276
+ },
2277
+ "151927": {
2278
+ "content": "|<EXTRA_TOKENS_258>|",
2279
+ "lstrip": false,
2280
+ "normalized": false,
2281
+ "rstrip": false,
2282
+ "single_word": false,
2283
+ "special": true
2284
+ },
2285
+ "151928": {
2286
+ "content": "|<EXTRA_TOKENS_259>|",
2287
+ "lstrip": false,
2288
+ "normalized": false,
2289
+ "rstrip": false,
2290
+ "single_word": false,
2291
+ "special": true
2292
+ },
2293
+ "151929": {
2294
+ "content": "|<EXTRA_TOKENS_260>|",
2295
+ "lstrip": false,
2296
+ "normalized": false,
2297
+ "rstrip": false,
2298
+ "single_word": false,
2299
+ "special": true
2300
+ },
2301
+ "151930": {
2302
+ "content": "|<EXTRA_TOKENS_261>|",
2303
+ "lstrip": false,
2304
+ "normalized": false,
2305
+ "rstrip": false,
2306
+ "single_word": false,
2307
+ "special": true
2308
+ },
2309
+ "151931": {
2310
+ "content": "|<EXTRA_TOKENS_262>|",
2311
+ "lstrip": false,
2312
+ "normalized": false,
2313
+ "rstrip": false,
2314
+ "single_word": false,
2315
+ "special": true
2316
+ },
2317
+ "151932": {
2318
+ "content": "|<EXTRA_TOKENS_263>|",
2319
+ "lstrip": false,
2320
+ "normalized": false,
2321
+ "rstrip": false,
2322
+ "single_word": false,
2323
+ "special": true
2324
+ },
2325
+ "151933": {
2326
+ "content": "|<EXTRA_TOKENS_264>|",
2327
+ "lstrip": false,
2328
+ "normalized": false,
2329
+ "rstrip": false,
2330
+ "single_word": false,
2331
+ "special": true
2332
+ },
2333
+ "151934": {
2334
+ "content": "|<EXTRA_TOKENS_265>|",
2335
+ "lstrip": false,
2336
+ "normalized": false,
2337
+ "rstrip": false,
2338
+ "single_word": false,
2339
+ "special": true
2340
+ },
2341
+ "151935": {
2342
+ "content": "|<EXTRA_TOKENS_266>|",
2343
+ "lstrip": false,
2344
+ "normalized": false,
2345
+ "rstrip": false,
2346
+ "single_word": false,
2347
+ "special": true
2348
+ },
2349
+ "151936": {
2350
+ "content": "<im_start>",
2351
+ "lstrip": false,
2352
+ "normalized": false,
2353
+ "rstrip": false,
2354
+ "single_word": false,
2355
+ "special": true
2356
+ },
2357
+ "151937": {
2358
+ "content": "<im_end>",
2359
+ "lstrip": false,
2360
+ "normalized": false,
2361
+ "rstrip": false,
2362
+ "single_word": false,
2363
+ "special": true
2364
+ },
2365
+ "151938": {
2366
+ "content": "<im_patch>",
2367
+ "lstrip": false,
2368
+ "normalized": false,
2369
+ "rstrip": false,
2370
+ "single_word": false,
2371
+ "special": true
2372
+ },
2373
+ "151939": {
2374
+ "content": "<im_col>",
2375
+ "lstrip": false,
2376
+ "normalized": false,
2377
+ "rstrip": false,
2378
+ "single_word": false,
2379
+ "special": true
2380
+ },
2381
+ "151940": {
2382
+ "content": "<low_res_im_start>",
2383
+ "lstrip": false,
2384
+ "normalized": false,
2385
+ "rstrip": false,
2386
+ "single_word": false,
2387
+ "special": true
2388
+ },
2389
+ "151941": {
2390
+ "content": "<|image|>",
2391
+ "lstrip": false,
2392
+ "normalized": false,
2393
+ "rstrip": false,
2394
+ "single_word": false,
2395
+ "special": true
2396
+ },
2397
+ "151942": {
2398
+ "content": "<im_low>",
2399
+ "lstrip": false,
2400
+ "normalized": false,
2401
+ "rstrip": false,
2402
+ "single_word": false,
2403
+ "special": true
2404
+ },
2405
+ "151943": {
2406
+ "content": "<frame_start>",
2407
+ "lstrip": false,
2408
+ "normalized": false,
2409
+ "rstrip": false,
2410
+ "single_word": false,
2411
+ "special": true
2412
+ },
2413
+ "151944": {
2414
+ "content": "<frame_end>",
2415
+ "lstrip": false,
2416
+ "normalized": false,
2417
+ "rstrip": false,
2418
+ "single_word": false,
2419
+ "special": true
2420
+ },
2421
+ "151945": {
2422
+ "content": "<|video|>",
2423
+ "lstrip": false,
2424
+ "normalized": false,
2425
+ "rstrip": false,
2426
+ "single_word": false,
2427
+ "special": true
2428
+ }
2429
+ },
2430
+ "additional_special_tokens": [
2431
+ "|<EXTRA_TOKENS_0>|",
2432
+ "|<EXTRA_TOKENS_1>|",
2433
+ "|<EXTRA_TOKENS_2>|",
2434
+ "|<EXTRA_TOKENS_3>|",
2435
+ "|<EXTRA_TOKENS_4>|",
2436
+ "|<EXTRA_TOKENS_5>|",
2437
+ "|<EXTRA_TOKENS_6>|",
2438
+ "|<EXTRA_TOKENS_7>|",
2439
+ "|<EXTRA_TOKENS_8>|",
2440
+ "|<EXTRA_TOKENS_9>|",
2441
+ "|<EXTRA_TOKENS_10>|",
2442
+ "|<EXTRA_TOKENS_11>|",
2443
+ "|<EXTRA_TOKENS_12>|",
2444
+ "|<EXTRA_TOKENS_13>|",
2445
+ "|<EXTRA_TOKENS_14>|",
2446
+ "|<EXTRA_TOKENS_15>|",
2447
+ "|<EXTRA_TOKENS_16>|",
2448
+ "|<EXTRA_TOKENS_17>|",
2449
+ "|<EXTRA_TOKENS_18>|",
2450
+ "|<EXTRA_TOKENS_19>|",
2451
+ "|<EXTRA_TOKENS_20>|",
2452
+ "|<EXTRA_TOKENS_21>|",
2453
+ "|<EXTRA_TOKENS_22>|",
2454
+ "|<EXTRA_TOKENS_23>|",
2455
+ "|<EXTRA_TOKENS_24>|",
2456
+ "|<EXTRA_TOKENS_25>|",
2457
+ "|<EXTRA_TOKENS_26>|",
2458
+ "|<EXTRA_TOKENS_27>|",
2459
+ "|<EXTRA_TOKENS_28>|",
2460
+ "|<EXTRA_TOKENS_29>|",
2461
+ "|<EXTRA_TOKENS_30>|",
2462
+ "|<EXTRA_TOKENS_31>|",
2463
+ "|<EXTRA_TOKENS_32>|",
2464
+ "|<EXTRA_TOKENS_33>|",
2465
+ "|<EXTRA_TOKENS_34>|",
2466
+ "|<EXTRA_TOKENS_35>|",
2467
+ "|<EXTRA_TOKENS_36>|",
2468
+ "|<EXTRA_TOKENS_37>|",
2469
+ "|<EXTRA_TOKENS_38>|",
2470
+ "|<EXTRA_TOKENS_39>|",
2471
+ "|<EXTRA_TOKENS_40>|",
2472
+ "|<EXTRA_TOKENS_41>|",
2473
+ "|<EXTRA_TOKENS_42>|",
2474
+ "|<EXTRA_TOKENS_43>|",
2475
+ "|<EXTRA_TOKENS_44>|",
2476
+ "|<EXTRA_TOKENS_45>|",
2477
+ "|<EXTRA_TOKENS_46>|",
2478
+ "|<EXTRA_TOKENS_47>|",
2479
+ "|<EXTRA_TOKENS_48>|",
2480
+ "|<EXTRA_TOKENS_49>|",
2481
+ "|<EXTRA_TOKENS_50>|",
2482
+ "|<EXTRA_TOKENS_51>|",
2483
+ "|<EXTRA_TOKENS_52>|",
2484
+ "|<EXTRA_TOKENS_53>|",
2485
+ "|<EXTRA_TOKENS_54>|",
2486
+ "|<EXTRA_TOKENS_55>|",
2487
+ "|<EXTRA_TOKENS_56>|",
2488
+ "|<EXTRA_TOKENS_57>|",
2489
+ "|<EXTRA_TOKENS_58>|",
2490
+ "|<EXTRA_TOKENS_59>|",
2491
+ "|<EXTRA_TOKENS_60>|",
2492
+ "|<EXTRA_TOKENS_61>|",
2493
+ "|<EXTRA_TOKENS_62>|",
2494
+ "|<EXTRA_TOKENS_63>|",
2495
+ "|<EXTRA_TOKENS_64>|",
2496
+ "|<EXTRA_TOKENS_65>|",
2497
+ "|<EXTRA_TOKENS_66>|",
2498
+ "|<EXTRA_TOKENS_67>|",
2499
+ "|<EXTRA_TOKENS_68>|",
2500
+ "|<EXTRA_TOKENS_69>|",
2501
+ "|<EXTRA_TOKENS_70>|",
2502
+ "|<EXTRA_TOKENS_71>|",
2503
+ "|<EXTRA_TOKENS_72>|",
2504
+ "|<EXTRA_TOKENS_73>|",
2505
+ "|<EXTRA_TOKENS_74>|",
2506
+ "|<EXTRA_TOKENS_75>|",
2507
+ "|<EXTRA_TOKENS_76>|",
2508
+ "|<EXTRA_TOKENS_77>|",
2509
+ "|<EXTRA_TOKENS_78>|",
2510
+ "|<EXTRA_TOKENS_79>|",
2511
+ "|<EXTRA_TOKENS_80>|",
2512
+ "|<EXTRA_TOKENS_81>|",
2513
+ "|<EXTRA_TOKENS_82>|",
2514
+ "|<EXTRA_TOKENS_83>|",
2515
+ "|<EXTRA_TOKENS_84>|",
2516
+ "|<EXTRA_TOKENS_85>|",
2517
+ "|<EXTRA_TOKENS_86>|",
2518
+ "|<EXTRA_TOKENS_87>|",
2519
+ "|<EXTRA_TOKENS_88>|",
2520
+ "|<EXTRA_TOKENS_89>|",
2521
+ "|<EXTRA_TOKENS_90>|",
2522
+ "|<EXTRA_TOKENS_91>|",
2523
+ "|<EXTRA_TOKENS_92>|",
2524
+ "|<EXTRA_TOKENS_93>|",
2525
+ "|<EXTRA_TOKENS_94>|",
2526
+ "|<EXTRA_TOKENS_95>|",
2527
+ "|<EXTRA_TOKENS_96>|",
2528
+ "|<EXTRA_TOKENS_97>|",
2529
+ "|<EXTRA_TOKENS_98>|",
2530
+ "|<EXTRA_TOKENS_99>|",
2531
+ "|<EXTRA_TOKENS_100>|",
2532
+ "|<EXTRA_TOKENS_101>|",
2533
+ "|<EXTRA_TOKENS_102>|",
2534
+ "|<EXTRA_TOKENS_103>|",
2535
+ "|<EXTRA_TOKENS_104>|",
2536
+ "|<EXTRA_TOKENS_105>|",
2537
+ "|<EXTRA_TOKENS_106>|",
2538
+ "|<EXTRA_TOKENS_107>|",
2539
+ "|<EXTRA_TOKENS_108>|",
2540
+ "|<EXTRA_TOKENS_109>|",
2541
+ "|<EXTRA_TOKENS_110>|",
2542
+ "|<EXTRA_TOKENS_111>|",
2543
+ "|<EXTRA_TOKENS_112>|",
2544
+ "|<EXTRA_TOKENS_113>|",
2545
+ "|<EXTRA_TOKENS_114>|",
2546
+ "|<EXTRA_TOKENS_115>|",
2547
+ "|<EXTRA_TOKENS_116>|",
2548
+ "|<EXTRA_TOKENS_117>|",
2549
+ "|<EXTRA_TOKENS_118>|",
2550
+ "|<EXTRA_TOKENS_119>|",
2551
+ "|<EXTRA_TOKENS_120>|",
2552
+ "|<EXTRA_TOKENS_121>|",
2553
+ "|<EXTRA_TOKENS_122>|",
2554
+ "|<EXTRA_TOKENS_123>|",
2555
+ "|<EXTRA_TOKENS_124>|",
2556
+ "|<EXTRA_TOKENS_125>|",
2557
+ "|<EXTRA_TOKENS_126>|",
2558
+ "|<EXTRA_TOKENS_127>|",
2559
+ "|<EXTRA_TOKENS_128>|",
2560
+ "|<EXTRA_TOKENS_129>|",
2561
+ "|<EXTRA_TOKENS_130>|",
2562
+ "|<EXTRA_TOKENS_131>|",
2563
+ "|<EXTRA_TOKENS_132>|",
2564
+ "|<EXTRA_TOKENS_133>|",
2565
+ "|<EXTRA_TOKENS_134>|",
2566
+ "|<EXTRA_TOKENS_135>|",
2567
+ "|<EXTRA_TOKENS_136>|",
2568
+ "|<EXTRA_TOKENS_137>|",
2569
+ "|<EXTRA_TOKENS_138>|",
2570
+ "|<EXTRA_TOKENS_139>|",
2571
+ "|<EXTRA_TOKENS_140>|",
2572
+ "|<EXTRA_TOKENS_141>|",
2573
+ "|<EXTRA_TOKENS_142>|",
2574
+ "|<EXTRA_TOKENS_143>|",
2575
+ "|<EXTRA_TOKENS_144>|",
2576
+ "|<EXTRA_TOKENS_145>|",
2577
+ "|<EXTRA_TOKENS_146>|",
2578
+ "|<EXTRA_TOKENS_147>|",
2579
+ "|<EXTRA_TOKENS_148>|",
2580
+ "|<EXTRA_TOKENS_149>|",
2581
+ "|<EXTRA_TOKENS_150>|",
2582
+ "|<EXTRA_TOKENS_151>|",
2583
+ "|<EXTRA_TOKENS_152>|",
2584
+ "|<EXTRA_TOKENS_153>|",
2585
+ "|<EXTRA_TOKENS_154>|",
2586
+ "|<EXTRA_TOKENS_155>|",
2587
+ "|<EXTRA_TOKENS_156>|",
2588
+ "|<EXTRA_TOKENS_157>|",
2589
+ "|<EXTRA_TOKENS_158>|",
2590
+ "|<EXTRA_TOKENS_159>|",
2591
+ "|<EXTRA_TOKENS_160>|",
2592
+ "|<EXTRA_TOKENS_161>|",
2593
+ "|<EXTRA_TOKENS_162>|",
2594
+ "|<EXTRA_TOKENS_163>|",
2595
+ "|<EXTRA_TOKENS_164>|",
2596
+ "|<EXTRA_TOKENS_165>|",
2597
+ "|<EXTRA_TOKENS_166>|",
2598
+ "|<EXTRA_TOKENS_167>|",
2599
+ "|<EXTRA_TOKENS_168>|",
2600
+ "|<EXTRA_TOKENS_169>|",
2601
+ "|<EXTRA_TOKENS_170>|",
2602
+ "|<EXTRA_TOKENS_171>|",
2603
+ "|<EXTRA_TOKENS_172>|",
2604
+ "|<EXTRA_TOKENS_173>|",
2605
+ "|<EXTRA_TOKENS_174>|",
2606
+ "|<EXTRA_TOKENS_175>|",
2607
+ "|<EXTRA_TOKENS_176>|",
2608
+ "|<EXTRA_TOKENS_177>|",
2609
+ "|<EXTRA_TOKENS_178>|",
2610
+ "|<EXTRA_TOKENS_179>|",
2611
+ "|<EXTRA_TOKENS_180>|",
2612
+ "|<EXTRA_TOKENS_181>|",
2613
+ "|<EXTRA_TOKENS_182>|",
2614
+ "|<EXTRA_TOKENS_183>|",
2615
+ "|<EXTRA_TOKENS_184>|",
2616
+ "|<EXTRA_TOKENS_185>|",
2617
+ "|<EXTRA_TOKENS_186>|",
2618
+ "|<EXTRA_TOKENS_187>|",
2619
+ "|<EXTRA_TOKENS_188>|",
2620
+ "|<EXTRA_TOKENS_189>|",
2621
+ "|<EXTRA_TOKENS_190>|",
2622
+ "|<EXTRA_TOKENS_191>|",
2623
+ "|<EXTRA_TOKENS_192>|",
2624
+ "|<EXTRA_TOKENS_193>|",
2625
+ "|<EXTRA_TOKENS_194>|",
2626
+ "|<EXTRA_TOKENS_195>|",
2627
+ "|<EXTRA_TOKENS_196>|",
2628
+ "|<EXTRA_TOKENS_197>|",
2629
+ "|<EXTRA_TOKENS_198>|",
2630
+ "|<EXTRA_TOKENS_199>|",
2631
+ "|<EXTRA_TOKENS_200>|",
2632
+ "|<EXTRA_TOKENS_201>|",
2633
+ "|<EXTRA_TOKENS_202>|",
2634
+ "|<EXTRA_TOKENS_203>|",
2635
+ "|<EXTRA_TOKENS_204>|",
2636
+ "|<EXTRA_TOKENS_205>|",
2637
+ "|<EXTRA_TOKENS_206>|",
2638
+ "|<EXTRA_TOKENS_207>|",
2639
+ "|<EXTRA_TOKENS_208>|",
2640
+ "|<EXTRA_TOKENS_209>|",
2641
+ "|<EXTRA_TOKENS_210>|",
2642
+ "|<EXTRA_TOKENS_211>|",
2643
+ "|<EXTRA_TOKENS_212>|",
2644
+ "|<EXTRA_TOKENS_213>|",
2645
+ "|<EXTRA_TOKENS_214>|",
2646
+ "|<EXTRA_TOKENS_215>|",
2647
+ "|<EXTRA_TOKENS_216>|",
2648
+ "|<EXTRA_TOKENS_217>|",
2649
+ "|<EXTRA_TOKENS_218>|",
2650
+ "|<EXTRA_TOKENS_219>|",
2651
+ "|<EXTRA_TOKENS_220>|",
2652
+ "|<EXTRA_TOKENS_221>|",
2653
+ "|<EXTRA_TOKENS_222>|",
2654
+ "|<EXTRA_TOKENS_223>|",
2655
+ "|<EXTRA_TOKENS_224>|",
2656
+ "|<EXTRA_TOKENS_225>|",
2657
+ "|<EXTRA_TOKENS_226>|",
2658
+ "|<EXTRA_TOKENS_227>|",
2659
+ "|<EXTRA_TOKENS_228>|",
2660
+ "|<EXTRA_TOKENS_229>|",
2661
+ "|<EXTRA_TOKENS_230>|",
2662
+ "|<EXTRA_TOKENS_231>|",
2663
+ "|<EXTRA_TOKENS_232>|",
2664
+ "|<EXTRA_TOKENS_233>|",
2665
+ "|<EXTRA_TOKENS_234>|",
2666
+ "|<EXTRA_TOKENS_235>|",
2667
+ "|<EXTRA_TOKENS_236>|",
2668
+ "|<EXTRA_TOKENS_237>|",
2669
+ "|<EXTRA_TOKENS_238>|",
2670
+ "|<EXTRA_TOKENS_239>|",
2671
+ "|<EXTRA_TOKENS_240>|",
2672
+ "|<EXTRA_TOKENS_241>|",
2673
+ "|<EXTRA_TOKENS_242>|",
2674
+ "|<EXTRA_TOKENS_243>|",
2675
+ "|<EXTRA_TOKENS_244>|",
2676
+ "|<EXTRA_TOKENS_245>|",
2677
+ "|<EXTRA_TOKENS_246>|",
2678
+ "|<EXTRA_TOKENS_247>|",
2679
+ "|<EXTRA_TOKENS_248>|",
2680
+ "|<EXTRA_TOKENS_249>|",
2681
+ "|<EXTRA_TOKENS_250>|",
2682
+ "|<EXTRA_TOKENS_251>|",
2683
+ "|<EXTRA_TOKENS_252>|",
2684
+ "|<EXTRA_TOKENS_253>|",
2685
+ "|<EXTRA_TOKENS_254>|",
2686
+ "|<EXTRA_TOKENS_255>|",
2687
+ "|<EXTRA_TOKENS_256>|",
2688
+ "|<EXTRA_TOKENS_257>|",
2689
+ "|<EXTRA_TOKENS_258>|",
2690
+ "|<EXTRA_TOKENS_259>|",
2691
+ "|<EXTRA_TOKENS_260>|",
2692
+ "|<EXTRA_TOKENS_261>|",
2693
+ "|<EXTRA_TOKENS_262>|",
2694
+ "|<EXTRA_TOKENS_263>|",
2695
+ "|<EXTRA_TOKENS_264>|",
2696
+ "|<EXTRA_TOKENS_265>|",
2697
+ "|<EXTRA_TOKENS_266>|",
2698
+ "<im_start>",
2699
+ "<im_end>",
2700
+ "<im_patch>",
2701
+ "<im_col>",
2702
+ "<low_res_im_start>",
2703
+ "<|image|>",
2704
+ "<im_low>",
2705
+ "<frame_start>",
2706
+ "<frame_end>",
2707
+ "<|video|>"
2708
+ ],
2709
+ "auto_map": {
2710
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
2711
+ },
2712
+ "bos_token": "<|im_end|>",
2713
+ "clean_up_tokenization_spaces": false,
2714
+ "eos_token": "<|im_end|>",
2715
+ "errors": "replace",
2716
+ "extra_special_tokens": {},
2717
+ "model_max_length": 131072,
2718
+ "pad_token": "<|endoftext|>",
2719
+ "processor_class": "Molmo2Processor",
2720
+ "split_special_tokens": false,
2721
+ "tokenizer_class": "Qwen2Tokenizer",
2722
+ "unk_token": null
2723
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_molmo2.Molmo2Processor",
4
+ "AutoVideoProcessor": "video_processing_molmo2.Molmo2VideoProcessor"
5
+ },
6
+ "crop_size": null,
7
+ "data_format": "channels_first",
8
+ "default_to_square": true,
9
+ "device": null,
10
+ "do_center_crop": null,
11
+ "do_convert_rgb": true,
12
+ "do_normalize": true,
13
+ "do_rescale": true,
14
+ "do_resize": true,
15
+ "do_sample_frames": true,
16
+ "fps": null,
17
+ "frame_sample_mode": "uniform_last_frame",
18
+ "image_mean": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "image_std": [
24
+ 0.5,
25
+ 0.5,
26
+ 0.5
27
+ ],
28
+ "input_data_format": null,
29
+ "max_fps": 2.0,
30
+ "num_frames": 384,
31
+ "pad_size": null,
32
+ "patch_size": 14,
33
+ "pooling_size": [
34
+ 3,
35
+ 3
36
+ ],
37
+ "processor_class": "Molmo2Processor",
38
+ "resample": 2,
39
+ "rescale_factor": 0.00392156862745098,
40
+ "return_metadata": false,
41
+ "sampling_fps": 2,
42
+ "size": {
43
+ "height": 378,
44
+ "width": 378
45
+ },
46
+ "video_metadata": null,
47
+ "video_processor_type": "Molmo2VideoProcessor"
48
+ }
video_processing_molmo2.py ADDED
@@ -0,0 +1,967 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Video processor class for Molmo2"""
2
+ from functools import partial
3
+ import os
4
+ import warnings
5
+ from contextlib import redirect_stdout
6
+ from io import BytesIO
7
+ from urllib.parse import urlparse
8
+ from typing import Optional, Union, Callable
9
+
10
+ import numpy as np
11
+ import requests
12
+ import einops
13
+ import torch
14
+ import torchvision.transforms
15
+
16
+ from transformers.image_utils import (
17
+ IMAGENET_STANDARD_MEAN,
18
+ IMAGENET_STANDARD_STD,
19
+ ImageInput,
20
+ PILImageResampling,
21
+ SizeDict,
22
+ validate_kwargs,
23
+ )
24
+ from transformers.video_utils import (
25
+ VideoInput,
26
+ is_valid_video,
27
+ make_batched_videos,
28
+ make_batched_metadata,
29
+ VideoMetadata,
30
+ )
31
+ from transformers.processing_utils import Unpack, VideosKwargs
32
+ from transformers.video_processing_utils import BaseVideoProcessor
33
+ from transformers.utils import logging
34
+ from transformers.feature_extraction_utils import BatchFeature
35
+ from transformers.utils import (
36
+ is_av_available,
37
+ is_decord_available,
38
+ is_torchcodec_available,
39
+ is_yt_dlp_available,
40
+ TensorType,
41
+ logging,
42
+ to_numpy,
43
+ )
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ MAX_VIDEO_FPS = 8
49
+
50
+
51
+ def normalize_image(
52
+ image: np.ndarray,
53
+ image_mean: list[float],
54
+ image_std: list[float],
55
+ ) -> np.ndarray:
56
+ image -= np.array(image_mean, dtype=np.float32)[None, None, :]
57
+ image /= np.array(image_std, dtype=np.float32)[None, None, :]
58
+ return image
59
+
60
+
61
+ def resize_image(
62
+ image: np.ndarray,
63
+ desired_output_size: list[int],
64
+ resample: PILImageResampling,
65
+ ) -> np.ndarray:
66
+ if len(image.shape) == 3:
67
+ is_video = False
68
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
69
+ else:
70
+ is_video = True
71
+ image = torch.permute(torch.from_numpy(image), [0, 3, 1, 2])
72
+ dtype = image.dtype
73
+ if torch.is_floating_point(image):
74
+ in_min = 0.0
75
+ in_max = 1.0
76
+ resized = torchvision.transforms.Resize(
77
+ desired_output_size,
78
+ resample,
79
+ antialias=False,
80
+ )(image)
81
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
82
+ else:
83
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
84
+ in_min = 0.0
85
+ in_max = 255.0
86
+ resized = torchvision.transforms.Resize(
87
+ desired_output_size,
88
+ resample,
89
+ antialias=False,
90
+ )(image)
91
+ resized = torch.clip(resized, 0, 255).to(dtype)
92
+
93
+ resized = resized.to(torch.float32)
94
+ resized = (resized - in_min) / (in_max - in_min)
95
+
96
+ if is_video:
97
+ resized = torch.permute(resized, [0, 2, 3, 1]).numpy()
98
+ else:
99
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
100
+
101
+ return resized
102
+
103
+
104
+ def build_resized_image(
105
+ image: np.ndarray,
106
+ base_image_input_size: list[int],
107
+ resample: PILImageResampling,
108
+ image_mean: list[float],
109
+ image_std: list[float],
110
+ image_patch_size: int,
111
+ ) -> tuple[np.ndarray, np.ndarray]:
112
+ resized = resize_image(
113
+ image, base_image_input_size, resample,
114
+ )
115
+ resized = normalize_image(resized, image_mean, image_std)
116
+ if len(resized.shape) == 3:
117
+ resized = np.expand_dims(resized, 0)
118
+ crop_patch_w = base_image_input_size[1] // image_patch_size
119
+ crop_patch_h = base_image_input_size[0] // image_patch_size
120
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
121
+ return resized, resize_idx
122
+
123
+
124
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
125
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
126
+ if len(array.shape) == 3:
127
+ n_crops, h, w = array.shape
128
+ h_patches = h//patch_size
129
+ w_patches = w//patch_size
130
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
131
+ array = np.transpose(array, [0, 1, 3, 2, 4])
132
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
133
+ return array
134
+ else:
135
+ n_crops, h, w, c = array.shape
136
+ h_patches = h//patch_size
137
+ w_patches = w//patch_size
138
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
139
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
140
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
141
+ return array
142
+
143
+
144
+ def arange_for_pooling(
145
+ idx_arr: np.ndarray,
146
+ pool_h: int,
147
+ pool_w: int,
148
+ ) -> np.ndarray:
149
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
150
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
151
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
152
+ mode='constant',constant_values=-1)
153
+ return einops.rearrange(
154
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
155
+
156
+
157
+ def image_to_patches_and_grids(
158
+ image: ImageInput,
159
+ base_image_input_size: list[int],
160
+ resample: PILImageResampling,
161
+ image_mean: list[float],
162
+ image_std: list[float],
163
+ image_patch_size: int,
164
+ image_pooling_w: int,
165
+ image_pooling_h: int,
166
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
167
+ """
168
+ :return image_grids, the shape of each image after pooling
169
+ :return crops, the image crops to processes with the ViT
170
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
171
+ patches in `crops` to pool for that token, masked with -1
172
+ """
173
+ if isinstance(base_image_input_size, int):
174
+ base_image_input_size = (base_image_input_size, base_image_input_size)
175
+
176
+ pooling_w = image_pooling_w
177
+ pooling_h = image_pooling_h
178
+
179
+ resized, resize_idx = build_resized_image(
180
+ image,
181
+ base_image_input_size,
182
+ resample,
183
+ image_mean,
184
+ image_std,
185
+ image_patch_size,
186
+ )
187
+ pooling_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
188
+ h, w = pooling_idx.shape[:2]
189
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
190
+ image_grid = [h, w]
191
+ return (
192
+ image_grid,
193
+ batch_pixels_to_patches(resized, image_patch_size),
194
+ pooling_idx,
195
+ )
196
+
197
+
198
+ def get_candidate_target_fps(
199
+ video_fps: Union[int, float],
200
+ sampling_fps: Union[int, float],
201
+ max_fps: Union[int, float] = MAX_VIDEO_FPS,
202
+ ) -> list[float]:
203
+ """
204
+ Return the subset of `video_fps` factors that remain multiples of `sampling_fps`.
205
+
206
+ Examples:
207
+ >>> get_candidate_target_fps(video_fps=6, sampling_fps=2)
208
+ [2, 6]
209
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=1)
210
+ [1, 5]
211
+ >>> get_candidate_target_fps(video_fps=2, sampling_fps=2)
212
+ [2]
213
+ >>> get_candidate_target_fps(video_fps=5, sampling_fps=2)
214
+ Traceback (most recent call last):
215
+ ...
216
+ ValueError: sampling_fps=2 must divide video_fps=5 to produce consistent frame steps.
217
+ """
218
+ video_fps = int(video_fps)
219
+ sampling_fps = int(sampling_fps)
220
+ max_fps = int(max_fps)
221
+
222
+ if sampling_fps is None:
223
+ raise ValueError("sampling_fps must be provided")
224
+ if video_fps <= 0 or sampling_fps <= 0:
225
+ raise ValueError(f"video_fps and sampling_fps must be positive (got {video_fps}, {sampling_fps})")
226
+ if video_fps % sampling_fps != 0:
227
+ raise ValueError(f"sampling_fps={sampling_fps} must divide video_fps={video_fps}.")
228
+
229
+ candidates = []
230
+ for candidate in range(sampling_fps, video_fps + 1, sampling_fps):
231
+ if candidate > max_fps:
232
+ break
233
+ if video_fps % candidate == 0:
234
+ candidates.append(float(candidate))
235
+
236
+ return candidates
237
+
238
+
239
+ def read_video_decord(
240
+ video_path,
241
+ sample_timestamps_fn: Callable,
242
+ **kwargs,
243
+ ) -> np.ndarray:
244
+ """
245
+ Decode a video using the Decord backend.
246
+
247
+ Args:
248
+ video_path (`str`):
249
+ Path to the video file.
250
+ sample_timestamps_fn (`Callable`):
251
+ A callable function that will return timestamps at which the video should be sampled.
252
+
253
+ Returns:
254
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
255
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
256
+ - `VideoMetadata` object.
257
+ """
258
+ # Lazy import from decord
259
+ import importlib
260
+ decord = importlib.import_module("decord")
261
+
262
+ vr = decord.VideoReader(uri=video_path, ctx=decord.cpu(0)) # decord has problems with gpu
263
+ video_fps = vr.get_avg_fps()
264
+ total_num_frames = len(vr)
265
+ time_stamps = vr.get_frame_timestamp(list(range(len(vr))))
266
+ duration = time_stamps[-1][1] - time_stamps[0][0]
267
+
268
+ metadata = VideoMetadata(
269
+ total_num_frames=int(total_num_frames),
270
+ fps=float(video_fps),
271
+ duration=float(duration),
272
+ video_backend="decord",
273
+ )
274
+
275
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
276
+ target_timestamps = np.array(target_timestamps)
277
+ offset = time_stamps[0, 0]
278
+
279
+ ix = np.searchsorted(time_stamps[:, 1], target_timestamps + offset, side='right')
280
+ ix = np.minimum(ix, len(time_stamps) - 1)
281
+
282
+ video = vr.get_batch(ix).asnumpy()
283
+ metadata.update(
284
+ {
285
+ "frames_indices": target_timestamps * video_fps,
286
+ "height": video.shape[1],
287
+ "width": video.shape[2],
288
+ }
289
+ )
290
+ return video, metadata
291
+
292
+
293
+ def read_video_torchcodec(
294
+ video_path,
295
+ sample_timestamps_fn: Callable,
296
+ **kwargs,
297
+ ) -> np.ndarray:
298
+ """
299
+ Decode a video using torchcodec decoder.
300
+
301
+ Args:
302
+ video_path (`str`):
303
+ Path to the video file.
304
+ sample_timestamps_fn (`Callable`):
305
+ A callable function that will return timestamps at which the video should be sampled.
306
+
307
+ Returns:
308
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
309
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
310
+ - `VideoMetadata` object.
311
+ """
312
+ # Lazy import torchcodec
313
+ import importlib
314
+ torchcodec = importlib.import_module("torchcodec")
315
+
316
+ decoder = torchcodec.decoders.VideoDecoder(
317
+ video_path,
318
+ # Interestingly `exact` mode takes less than approximate when we load the whole video
319
+ seek_mode="exact",
320
+ # Allow FFmpeg decide on the number of threads for efficiency
321
+ num_ffmpeg_threads=0,
322
+ )
323
+ # If the first frame starts at > 0, we effectively clip the video starting at that time
324
+ # since (most) video players would also skip to that time
325
+ time_offset = decoder.metadata.begin_stream_seconds_from_content
326
+ # Note this duration does assume we started playing at `time_offset`
327
+ duration = decoder.metadata.duration_seconds
328
+
329
+ metadata = VideoMetadata(
330
+ total_num_frames=decoder.metadata.num_frames,
331
+ fps=decoder.metadata.average_fps,
332
+ duration=duration,
333
+ video_backend="torchcodec",
334
+ height=decoder.metadata.height,
335
+ width=decoder.metadata.width,
336
+ )
337
+
338
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
339
+
340
+ # Floating point/rounding issues might cause `target_timestamps` to be very slightly
341
+ # out-of-bounds, to handle this we sanity check then clip them
342
+ assert all(x >= 0 for x in target_timestamps)
343
+ assert all(x < duration+1e-6 for x in target_timestamps)
344
+ # 1e-6 padding since torchcodec can throw out-of-bounds errors even if you ask for the
345
+ # exact boundary value, we should still get the first/last frame anyway
346
+ max_timestamp = decoder.metadata.end_stream_seconds_from_content - 1e-6
347
+ min_timestamp = decoder.metadata.begin_stream_seconds_from_content + 1e-6
348
+ # Note we avoid using numpy ops here to reduce floating precision issues
349
+ timestamps = [x + time_offset for x in target_timestamps]
350
+ timestamps = [max(min_timestamp, min(max_timestamp, x)) for x in timestamps]
351
+
352
+ video = decoder.get_frames_played_at(timestamps).data.numpy().transpose(0, 2, 3, 1) # Convert to THWC format
353
+ target_timestamps = np.array(target_timestamps)
354
+ metadata.frames_indices = target_timestamps * metadata.fps
355
+
356
+ return video, metadata
357
+
358
+
359
+ def read_video_pyav(
360
+ video_path,
361
+ sample_timestamps_fn: Callable,
362
+ **kwargs,
363
+ ) -> np.ndarray:
364
+ """
365
+ Decode a video using the PyAV backend.
366
+
367
+ Args:
368
+ video_path (`str`):
369
+ Path to the video file.
370
+ sample_timestamps_fn (`Callable`):
371
+ A callable function that will return timestamps at which the video should be sampled.
372
+
373
+ Returns:
374
+ tuple[`np.array`, `VideoMetadata`]: A tuple containing:
375
+ - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
376
+ - `VideoMetadata` object.
377
+ """
378
+ # Lazy import torchcodec
379
+ import importlib
380
+ av = importlib.import_module("av")
381
+
382
+ with av.open(video_path) as container:
383
+ video_stream = container.streams.video[0]
384
+ fps = video_stream.average_rate or video_stream.guessed_rate
385
+ it = container.decode(video=0)
386
+ frames = list(it)
387
+
388
+ stream = container.streams.video[0]
389
+ start = frames[0].pts * stream.time_base
390
+ container_end = stream.duration
391
+ if container_end is not None:
392
+ container_end *= stream.time_base
393
+ if container_end is None or container_end < frames[-1].pts:
394
+ # Some problem with stream duration, so use the frame PTS directly
395
+ # and guess the duration of the last frame
396
+ end = frames[-1].pts * stream.time_base + 1/fps
397
+ else:
398
+ end = container_end
399
+ duration = float(end - start)
400
+
401
+ metadata = VideoMetadata(
402
+ total_num_frames=len(frames),
403
+ fps=float(fps),
404
+ duration=float(duration),
405
+ video_backend="pyav",
406
+ height=video_stream.height,
407
+ width=video_stream.width,
408
+ )
409
+
410
+ target_timestamps = sample_timestamps_fn(metadata=metadata, **kwargs)
411
+ offset = float(start)
412
+
413
+ target_timestamps = np.array(target_timestamps)
414
+ end_time_stamps = np.array([float(frame.pts * stream.time_base) for frame in frames[1:]] + [duration])
415
+ indices = np.searchsorted(end_time_stamps, target_timestamps + offset, side='right')
416
+ indices = np.minimum(indices, len(end_time_stamps) - 1)
417
+
418
+ video = np.stack(
419
+ [frames[i].to_ndarray(format="rgb24", channel_last=True) for i in indices],
420
+ axis=0,
421
+ )
422
+
423
+ metadata.frames_indices = target_timestamps * fps
424
+
425
+ return video, metadata
426
+
427
+
428
+ VIDEO_DECODERS = {
429
+ "decord": read_video_decord,
430
+ "torchcodec": read_video_torchcodec,
431
+ "pyav": read_video_pyav,
432
+ }
433
+
434
+
435
+ def load_video(
436
+ video: VideoInput,
437
+ backend: str = "decord",
438
+ sample_timestamps_fn: Optional[Callable] = None,
439
+ **kwargs,
440
+ ):
441
+ """
442
+ Loads `video` to a numpy array.
443
+
444
+ Args:
445
+ video (`VideoInput`):
446
+ The video to convert to the numpy array format. Can be a link to video or local path.
447
+ backend (`str`, *optional*, defaults to `"decord"`):
448
+ The backend to use when loading the video. Can be any of ["decord", "pyav", ""torchcodec"]. Defaults to "decord".
449
+ sample_timestamps_fn (`Callable`):
450
+ A callable function that will return timestamps at which the video should be sampled.
451
+ """
452
+
453
+ # Early exit if provided an array or `PIL` frames
454
+ if not isinstance(video, str):
455
+ metadata = [None] * len(video)
456
+ return video, metadata
457
+
458
+ if urlparse(video).netloc in ["www.youtube.com", "youtube.com"]:
459
+ if not is_yt_dlp_available():
460
+ raise ImportError("To load a video from YouTube url you have to install `yt_dlp` first.")
461
+ # Lazy import from yt_dlp
462
+ import importlib
463
+ yt_dlp = importlib.import_module("yt_dlp")
464
+
465
+ buffer = BytesIO()
466
+ with redirect_stdout(buffer), yt_dlp.YoutubeDL() as f:
467
+ f.download([video])
468
+ bytes_obj = buffer.getvalue()
469
+ file_obj = BytesIO(bytes_obj)
470
+ elif video.startswith("http://") or video.startswith("https://"):
471
+ file_obj = BytesIO(requests.get(video).content)
472
+ elif os.path.isfile(video):
473
+ file_obj = video
474
+ else:
475
+ raise TypeError("Incorrect format used for video. Should be an url linking to an video or a local path.")
476
+
477
+ # can also load with decord, but not cv2/torchvision
478
+ # both will fail in case of url links
479
+ video_is_url = video.startswith("http://") or video.startswith("https://")
480
+ if video_is_url and backend == "opencv":
481
+ raise ValueError("If you are trying to load a video from URL, you cannot use 'opencv' as backend")
482
+
483
+ if (
484
+ (not is_decord_available() and backend == "decord")
485
+ or (not is_torchcodec_available() and backend == "torchcodec")
486
+ or (not is_av_available() and backend == "pyav")
487
+ ):
488
+ raise ImportError(
489
+ f"You chose backend={backend} for loading the video but the required library is not found in your environment "
490
+ f"Make sure to install {backend} before loading the video."
491
+ )
492
+
493
+ video_decoder = VIDEO_DECODERS[backend]
494
+ video, metadata = video_decoder(file_obj, sample_timestamps_fn, **kwargs)
495
+ return video, metadata
496
+
497
+
498
+ def get_target_fps(
499
+ video_fps: float,
500
+ max_frames: int,
501
+ total_frames: int,
502
+ frame_sample_mode: str,
503
+ candidate_target_fps: tuple[float],
504
+ ) -> float:
505
+ """
506
+ Get the target fps that best spans the video and has the most frames sampled
507
+ """
508
+ num_frames_sampled = 0
509
+ selected_target_fps = None
510
+ for target_fps in candidate_target_fps:
511
+ step_size = max(int(video_fps / target_fps), 1)
512
+ num_frames_sampled_at_fps = int(total_frames / step_size)
513
+ if num_frames_sampled == 0:
514
+ if "uniform" in frame_sample_mode:
515
+ if num_frames_sampled_at_fps > max_frames:
516
+ break
517
+ selected_target_fps = target_fps
518
+ num_frames_sampled = num_frames_sampled_at_fps
519
+
520
+ else:
521
+ # the candidate sampling fps increases so frame count can't decrease
522
+ assert num_frames_sampled <= num_frames_sampled_at_fps
523
+ if num_frames_sampled_at_fps > max_frames:
524
+ # choose the sampling fps that spans the video
525
+ continue
526
+
527
+ elif num_frames_sampled_at_fps > num_frames_sampled:
528
+ # both are less than max_frames, choose the one with higher density of frames sampled
529
+ selected_target_fps = target_fps
530
+ num_frames_sampled = num_frames_sampled_at_fps
531
+ return selected_target_fps
532
+
533
+
534
+ def get_frame_times_and_chosen_fps(
535
+ selected_target_fps,
536
+ total_frames,
537
+ max_frames,
538
+ video_fps
539
+ ):
540
+ if selected_target_fps is None:
541
+ frame_indices = np.linspace(0, total_frames, max_frames, endpoint=False, dtype=int)
542
+ else:
543
+ step_size = max(int(video_fps / selected_target_fps), 1)
544
+ frame_indices = np.arange(0, total_frames, step_size)
545
+ if len(frame_indices) > max_frames:
546
+ frame_indices = frame_indices[:max_frames]
547
+ return selected_target_fps, frame_indices
548
+
549
+
550
+ class Molmo2VideoProcessorKwargs(VideosKwargs, total=False):
551
+ patch_size: Optional[int]
552
+ pooling_size: Optional[list[int]]
553
+ frame_sample_mode: Optional[str]
554
+ max_fps: Optional[int]
555
+ sampling_fps: Optional[int]
556
+
557
+
558
+ class Molmo2VideoProcessor(BaseVideoProcessor):
559
+ resample = PILImageResampling.BILINEAR
560
+ size = {"height": 378, "width": 378}
561
+ image_mean = IMAGENET_STANDARD_MEAN
562
+ image_std = IMAGENET_STANDARD_STD
563
+ do_resize = True
564
+ do_rescale = True
565
+ do_normalize = True
566
+ do_convert_rgb = True
567
+ patch_size = 14
568
+ pooling_size = [3, 3]
569
+ do_sample_frames = True
570
+ frame_sample_mode = "uniform_last_frame"
571
+ max_fps = 2
572
+ sampling_fps = 2
573
+ valid_kwargs = Molmo2VideoProcessorKwargs
574
+ model_input_names = ["pixel_values_videos", "video_token_pooling", "video_grids"]
575
+
576
+ def __init__(self, **kwargs: Unpack[Molmo2VideoProcessorKwargs]):
577
+ super().__init__(**kwargs)
578
+ if self.size is not None and (
579
+ self.size.get("height", None) is None or self.size.get("width", None) is None
580
+ ):
581
+ raise ValueError("size must contain 'height' and 'width' keys.")
582
+
583
+ def _further_process_kwargs(
584
+ self,
585
+ size: Optional[SizeDict] = None,
586
+ **kwargs,
587
+ ) -> dict:
588
+ """
589
+ Update kwargs that need further processing before being validated
590
+ Can be overridden by subclasses to customize the processing of kwargs.
591
+ """
592
+ if size is not None and ("height" not in size or "width" not in size):
593
+ raise ValueError("size must contain 'height' and 'width' keys.")
594
+
595
+ return super()._further_process_kwargs(size=size, **kwargs)
596
+
597
+ def sample_times(
598
+ self,
599
+ metadata: VideoMetadata,
600
+ frame_sample_mode: str,
601
+ num_frames: int,
602
+ max_fps: Optional[int] = None,
603
+ sampling_fps: Optional[int] = None,
604
+ **kwargs,
605
+ ) -> np.ndarray:
606
+ """
607
+ Time-based sampling if an array video is passed
608
+ Args:
609
+ metadata (`VideoMetadata`):
610
+ Metadata of the video containing information about total duration, fps and total number of frames.
611
+ frame_sample_mode (`str`, *optional*):
612
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
613
+ num_frames (`int`, *optional*):
614
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
615
+ man_fps (`int`, *optional*):
616
+ Maximum frames per second to sample.
617
+ sampling_fps (`int`, *optional*):
618
+ Sampling frames per second. Defaults to `self.sampling_fps`.
619
+ Used when `frame_sample_mode` is `"fps"`.
620
+ """
621
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
622
+ num_frames = num_frames or self.num_frames
623
+ sampling_fps = sampling_fps or self.sampling_fps
624
+
625
+ duration = metadata.duration or metadata.total_num_frames / metadata.fps
626
+ if frame_sample_mode == "fps":
627
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
628
+ # Try larger and larger FPSs until we hit one that can't span the video
629
+ target_fps = candidate_target_fps[0]
630
+ for candidate_fps in candidate_target_fps[1:]:
631
+ if num_frames / candidate_fps < duration:
632
+ break
633
+ target_fps = candidate_fps
634
+ times = np.arange(0, num_frames) / target_fps
635
+ times = times[times < duration]
636
+ return times
637
+ elif frame_sample_mode == "uniform_last_frame":
638
+ if max_fps is not None:
639
+ max_duration = (num_frames-1) / max_fps # -1 to include the last frame
640
+ if max_duration < duration:
641
+ times = np.linspace(
642
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
643
+ )
644
+ else:
645
+ times = np.arange(0.0, stop=duration, step=1/max_fps)
646
+ times = np.concatenate([times, [duration]], axis=0)
647
+ assert len(times) <= num_frames
648
+ else:
649
+ times = np.linspace(
650
+ 0, duration, num=num_frames, endpoint=True, dtype=np.float64
651
+ )
652
+ return times
653
+ else:
654
+ raise NotImplementedError(frame_sample_mode)
655
+
656
+ def sample_frames(
657
+ self,
658
+ metadata: VideoMetadata,
659
+ frame_sample_mode: Optional[str] = None,
660
+ num_frames: Optional[int] = None,
661
+ max_fps: Optional[int] = None,
662
+ sampling_fps: Optional[int] = None,
663
+ **kwargs,
664
+ ) -> np.ndarray:
665
+ """
666
+ Frame-based sampling if an array video is passed
667
+ Args:
668
+ metadata (`VideoMetadata`):
669
+ Metadata of the video containing information about total duration, fps and total number of frames.
670
+ frame_sample_mode (`str`, *optional*):
671
+ Mode to sample frames. Defaults to `self.frame_sample_mode`.
672
+ num_frames (`int`, *optional*):
673
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
674
+ max_fps (`int`, *optional*):
675
+ Maximum frames per second to sample.
676
+ sampling_fps (`int`, *optional*):
677
+ Sampling frames per second. Defaults to `self.sampling_fps`.
678
+ Used when `frame_sample_mode` is `"fps"`.
679
+ """
680
+ frame_sample_mode = frame_sample_mode or self.frame_sample_mode
681
+ num_frames = num_frames or self.num_frames
682
+ sampling_fps = sampling_fps or self.sampling_fps
683
+
684
+ total_num_frames = metadata.total_num_frames
685
+ if frame_sample_mode == "uniform_last_frame" and max_fps is not None:
686
+ duration = total_num_frames / metadata.fps
687
+ if total_num_frames <= 2:
688
+ return np.arange(total_num_frames).astype(int)
689
+ if duration > (num_frames - 1) / max_fps: # -1 to include the last frame
690
+ # uniform fallback
691
+ indices = np.linspace(
692
+ 0,
693
+ total_num_frames - 1,
694
+ num=min(num_frames, total_num_frames),
695
+ endpoint=True,
696
+ ).astype(int)
697
+ return indices
698
+ else:
699
+ float_indices = np.arange(
700
+ 0.0, stop=total_num_frames - 1, step=float(metadata.fps / max_fps),
701
+ )
702
+ if np.round(float_indices[-1]) != total_num_frames - 1:
703
+ float_indices = np.concatenate([float_indices, [total_num_frames - 1]], axis=0)
704
+ indices = np.round(float_indices).astype(int)
705
+ assert indices[-1] < total_num_frames
706
+ assert len(float_indices) <= num_frames
707
+ return indices
708
+ elif frame_sample_mode == "uniform_last_frame":
709
+ indices = np.linspace(
710
+ 0, total_num_frames - 1, num=min(num_frames, total_num_frames), endpoint=True,
711
+ ).astype(int)
712
+ return indices
713
+ elif frame_sample_mode == "fps":
714
+ candidate_target_fps = get_candidate_target_fps(metadata.fps, sampling_fps)
715
+ selected_target_fps = get_target_fps(
716
+ metadata.fps,
717
+ num_frames,
718
+ total_num_frames,
719
+ frame_sample_mode,
720
+ candidate_target_fps,
721
+ )
722
+ _, indices = get_frame_times_and_chosen_fps(
723
+ selected_target_fps,
724
+ total_num_frames,
725
+ num_frames,
726
+ metadata.fps,
727
+ )
728
+ return indices
729
+ else:
730
+ raise NotImplementedError(frame_sample_mode)
731
+
732
+ def fetch_videos(
733
+ self,
734
+ video_url_or_urls: Union[str, list[str], list[list[str]]],
735
+ sample_timestamps_fn=None
736
+ ):
737
+ """
738
+ Convert a single or a list of urls into the corresponding `np.array` objects.
739
+
740
+ If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
741
+ returned.
742
+ """
743
+ if (
744
+ (not is_decord_available())
745
+ and (not is_torchcodec_available())
746
+ and (not is_av_available())
747
+ ):
748
+ raise ImportError(
749
+ "Molmo2VideoProcessor requires `decord`, `torchcodec`, or `av` to be installed."
750
+ )
751
+
752
+ if is_decord_available():
753
+ backend = "decord"
754
+ elif is_torchcodec_available():
755
+ warnings.warn(
756
+ "`decord` is not installed and cannot be used to decode the video by default. "
757
+ "Falling back to `torchcodec`."
758
+ )
759
+ backend = "torchcodec"
760
+ else:
761
+ warnings.warn(
762
+ "`decord` is not installed and cannot be used to decode the video by default. "
763
+ "Falling back to `PyAV`."
764
+ )
765
+ backend = "pyav"
766
+
767
+ if isinstance(video_url_or_urls, list):
768
+ return list(zip(*[self.fetch_videos(x, sample_timestamps_fn=sample_timestamps_fn) for x in video_url_or_urls]))
769
+ else:
770
+ return load_video(video_url_or_urls, backend=backend, sample_timestamps_fn=sample_timestamps_fn)
771
+
772
+ def _decode_and_sample_videos(
773
+ self,
774
+ videos: VideoInput,
775
+ video_metadata: Union[VideoMetadata, dict],
776
+ do_sample_frames: Optional[bool] = None,
777
+ sample_indices_fn: Optional[Callable] = None,
778
+ sample_timestamps_fn: Optional[Callable] = None,
779
+ ):
780
+ """
781
+ Decode input videos and sample frames if needed.
782
+ """
783
+ videos = make_batched_videos(videos)
784
+ video_metadata = make_batched_metadata(videos, video_metadata=video_metadata)
785
+
786
+ # Framed-based sampling if an array video is passed
787
+ # Otherwise, time-based sampling with decoding
788
+ if is_valid_video(videos[0]) and do_sample_frames:
789
+ assert video_metadata[0].fps is not None, "FPS must be provided for video input"
790
+ sampled_videos = []
791
+ sampled_metadata = []
792
+ for video, metadata in zip(videos, video_metadata):
793
+ indices = sample_indices_fn(metadata=metadata)
794
+ metadata.frames_indices = indices
795
+ sampled_videos.append(video[indices])
796
+ sampled_metadata.append(metadata)
797
+ videos = sampled_videos
798
+ video_metadata = sampled_metadata
799
+ elif not is_valid_video(videos[0]):
800
+ if sample_indices_fn is None:
801
+ logger.warning(
802
+ "do_sample_frames is False, but video array is not provided: "
803
+ "Will decode the video and sample frames using Molmo2's default sampling mode"
804
+ )
805
+ if isinstance(videos[0], list):
806
+ raise ValueError(
807
+ "A list of images is not supported for video input!"
808
+ )
809
+ else:
810
+ videos, video_metadata = self.fetch_videos(videos, sample_timestamps_fn=sample_timestamps_fn)
811
+
812
+ return videos, video_metadata
813
+
814
+ def _prepare_input_videos(
815
+ self,
816
+ videos: VideoInput,
817
+ **kwargs,
818
+ ) -> list[np.ndarray]:
819
+ processed_videos = [to_numpy(video) for video in videos]
820
+ return processed_videos
821
+
822
+ def preprocess(
823
+ self,
824
+ videos: VideoInput,
825
+ **kwargs: Unpack[Molmo2VideoProcessorKwargs],
826
+ ) -> BatchFeature:
827
+ validate_kwargs(
828
+ captured_kwargs=kwargs.keys(),
829
+ valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"],
830
+ )
831
+
832
+ # Set default kwargs from self. This ensures that if a kwarg is not provided
833
+ # by the user, it gets its default value from the instance, or is set to None.
834
+ for kwarg_name in self.valid_kwargs.__annotations__:
835
+ kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
836
+
837
+ do_sample_frames = kwargs.pop("do_sample_frames")
838
+ video_metadata = kwargs.pop("video_metadata")
839
+
840
+ sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None
841
+ sample_timestamps_fn = partial(self.sample_times, **kwargs)
842
+ videos, video_metadata = self._decode_and_sample_videos(
843
+ videos,
844
+ video_metadata=video_metadata,
845
+ do_sample_frames=do_sample_frames,
846
+ sample_indices_fn=sample_indices_fn,
847
+ sample_timestamps_fn=sample_timestamps_fn,
848
+ )
849
+ videos = self._prepare_input_videos(videos=videos)
850
+
851
+ kwargs = self._further_process_kwargs(**kwargs)
852
+
853
+ return_metadata = kwargs.pop("return_metadata")
854
+ preprocessed_videos = self._preprocess(videos=videos, **kwargs)
855
+ if return_metadata:
856
+ preprocessed_videos["video_metadata"] = video_metadata
857
+ return preprocessed_videos
858
+
859
+ def _preprocess(
860
+ self,
861
+ videos: list[np.ndarray],
862
+ size: Optional[SizeDict] = None,
863
+ resample: Optional[PILImageResampling] = None,
864
+ image_mean: Optional[Union[float, list[float]]] = None,
865
+ image_std: Optional[Union[float, list[float]]] = None,
866
+ do_convert_rgb: Optional[bool] = None,
867
+ patch_size: Optional[int] = None,
868
+ pooling_size: Optional[list[int]] = None,
869
+ return_tensors: Optional[Union[str, TensorType]] = None,
870
+ **kwargs,
871
+ ) -> BatchFeature:
872
+ """
873
+ Preprocess a video for the model.
874
+ Args:
875
+ videos (`VideoInput`):
876
+ Video to preprocess.
877
+ size (`SizeDict`, *optional*, defaults to `self.size`):
878
+ Size of the image after resizing.
879
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
880
+ Resampling filter to use when resizing the image. This can be one of the enum `PILImageResampling`. Only
881
+ has an effect if `do_resize` is set to `True`.
882
+ image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
883
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
884
+ image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
885
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
886
+ `True`.
887
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
888
+ Whether to convert the image to RGB.
889
+ patch_size (`int`, *optional*, defaults to `self.patch_size`):
890
+ The spatial patch size of the vision encoder.
891
+ pooling_size (`list[int]`, *optional*, defaults to `self.pooling_size`):
892
+ The pooling size of the vision adapter.
893
+ return_tensors (`str` or `TensorType`, *optional*):
894
+ The type of tensors to return. Can be one of:
895
+ - Unset: Return a list of `np.ndarray`.
896
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
897
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
898
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
899
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
900
+
901
+ Returns:
902
+ A `BatchFeature` containing the following keys:
903
+ - `pixel_values_videos`: The preprocessed videos.
904
+ - `video_token_pooling`: The indices of the patches in `crops` to pool for each token in `video_tokens`.
905
+ - `video_grids`: The video grids.
906
+ """
907
+ if size.height is None or size.width is None:
908
+ raise ValueError("size must contain 'height' and 'width' keys.")
909
+
910
+ base_image_input_size = [size.height, size.width]
911
+
912
+ resample = resample or self.resample
913
+ image_mean = image_mean or self.image_mean
914
+ image_std = image_std or self.image_std
915
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
916
+
917
+ patch_size = patch_size or self.patch_size
918
+ pooling_size = pooling_size or self.pooling_size
919
+
920
+ image_pooling_h, image_pooling_w = pooling_size
921
+
922
+ batch_grids = []
923
+ batch_crops = []
924
+ batch_pooled_patches_idx = []
925
+
926
+ for video in videos:
927
+ all_crops = []
928
+ pooled_patches_idx = []
929
+
930
+ for frame in video:
931
+ image_grid, crops, pooled_idx = image_to_patches_and_grids(
932
+ frame,
933
+ base_image_input_size,
934
+ resample,
935
+ image_mean,
936
+ image_std,
937
+ patch_size,
938
+ image_pooling_w,
939
+ image_pooling_h,
940
+ )
941
+ offset = sum(np.prod(x.shape[:2]) for x in all_crops)
942
+ pooled_idx_with_offset = np.where(pooled_idx >= 0, pooled_idx + offset, pooled_idx)
943
+ pooled_patches_idx.append(pooled_idx_with_offset)
944
+ all_crops.append(crops)
945
+
946
+ video_grid = np.array([len(video), image_grid[0], image_grid[1]])
947
+ all_crops = np.concatenate(all_crops, 0)
948
+ pooled_patches_idx = np.concatenate(pooled_patches_idx, 0)
949
+
950
+ batch_grids.append(video_grid)
951
+ batch_crops.append(all_crops)
952
+ batch_pooled_patches_idx.append(pooled_patches_idx)
953
+
954
+ video_grids = np.stack(batch_grids, 0)
955
+ pixel_values_videos = np.concatenate(batch_crops, 0)
956
+ video_token_pooling = np.concatenate(batch_pooled_patches_idx, 0)
957
+
958
+ data =dict(
959
+ pixel_values_videos=pixel_values_videos,
960
+ video_token_pooling=video_token_pooling,
961
+ video_grids=video_grids,
962
+ )
963
+
964
+ return BatchFeature(data, tensor_type=return_tensors)
965
+
966
+
967
+ Molmo2VideoProcessor.register_for_auto_class()
vocab.json ADDED
The diff for this file is too large to render. See raw diff