Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +311 -3
- added_tokens.json +28 -0
- chat_template.json +3 -0
- config.json +105 -0
- configuration_ovis2_6.py +147 -0
- generation_config.json +15 -0
- merges.txt +0 -0
- model-00001-of-00013.safetensors +3 -0
- model-00002-of-00013.safetensors +3 -0
- model-00003-of-00013.safetensors +3 -0
- model-00004-of-00013.safetensors +3 -0
- model-00005-of-00013.safetensors +3 -0
- model-00006-of-00013.safetensors +3 -0
- model-00007-of-00013.safetensors +3 -0
- model-00008-of-00013.safetensors +3 -0
- model-00009-of-00013.safetensors +3 -0
- model-00010-of-00013.safetensors +3 -0
- model-00011-of-00013.safetensors +3 -0
- model-00012-of-00013.safetensors +3 -0
- model-00013-of-00013.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_ovis2_6.py +1458 -0
- preprocessor_config.json +24 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +247 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,3 +1,311 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
# Ovis2.6-30B-A3B
|
| 6 |
+
|
| 7 |
+
<div align="center">
|
| 8 |
+
<img src=https://cdn-uploads.huggingface.co/production/uploads/637aebed7ce76c3b834cea37/3IK823BZ8w-mz_QfeYkDn.png width="30%"/>
|
| 9 |
+
</div>
|
| 10 |
+
|
| 11 |
+
## Introduction
|
| 12 |
+
|
| 13 |
+
We introduce **Ovis2.6-30B-A3B**, the latest advancement in the Ovis series of Multimodal Large Language Models (MLLMs). Building on the strong foundation of Ovis2.5, Ovis2.6 upgrades the LLM backbone to a **Mixture-of-Experts (MoE)** architecture, delivering superior multimodal performance at a fraction of the serving cost. It also brings major improvements in long-context and high-resolution understanding, visual reasoning with active image analysis, and information-dense document comprehension.
|
| 14 |
+
|
| 15 |
+
<div align="center">
|
| 16 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/658a8a837959448ef5500ce5/IPsQk8gTTMD-ipTye3WED.png" width="100%" />
|
| 17 |
+
</div>
|
| 18 |
+
|
| 19 |
+
## Key Features
|
| 20 |
+
|
| 21 |
+
- **MoE Architecture: Superior Performance with Low Serving Cost**
|
| 22 |
+
The LLM side has been upgraded to a **Mixture-of-Experts (MoE)** architecture. This allows Ovis2.6 to scale up to **30B total parameters**, capturing vast amounts of knowledge and nuance. Crucially, it achieves this with only **~3B active parameters** during inference, ensuring low serving costs and high throughput.
|
| 23 |
+
|
| 24 |
+
- **Enhanced Long-Sequence and High-Resolution Processing**
|
| 25 |
+
Ovis2.6 extends the context window to **64K tokens** and supports image resolutions up to **2880×2880**, significantly improving its ability to process high-resolution and information-dense visual inputs. These enhancements are particularly effective for **long-document question answering**, where the model must gather and synthesize clues scattered across multiple pages to arrive at the correct answer.
|
| 26 |
+
|
| 27 |
+
- **Think with Image**
|
| 28 |
+
We introduce the **"Think with Image"** capability, which transforms vision from a passive input into an active cognitive workspace. During reasoning, the model can actively invoke visual tools (e.g., cropping and rotation) to re-examine and analyze image regions within its Chain-of-Thought, enabling multi-turn, self-reflective reasoning over visual inputs for higher accuracy on complex tasks.
|
| 29 |
+
|
| 30 |
+
- **Reinforced OCR, Document, and Chart Capabilities**
|
| 31 |
+
Continuing our focus on information-dense visual tasks, we have further reinforced the model's capabilities in **Optical Character Recognition (OCR)**, **document understanding**, and **chart/diagram analysis**. Ovis2.6 excels not only at accurately extracting structured information from visual data, but also at **reasoning** over the extracted content.
|
| 32 |
+
|
| 33 |
+
<div align="center">
|
| 34 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/658a8a837959448ef5500ce5/3_A0CA-oO0Ie_WoigjAwo.png" width="100%" />
|
| 35 |
+
</div>
|
| 36 |
+
|
| 37 |
+
## Quick Inference
|
| 38 |
+
|
| 39 |
+
Below is a simple example demonstrating how to run Ovis2.6 with a single image input.
|
| 40 |
+
|
| 41 |
+
First, install the required dependencies:
|
| 42 |
+
|
| 43 |
+
```bash
|
| 44 |
+
pip install torch==2.7.1 transformers==4.57.0 numpy==1.25.0 pillow==10.3.0 moviepy==1.0.3 accelerate==1.12.0
|
| 45 |
+
pip install --no-build-isolation --no-cache-dir flash-attn==2.8.3
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
Then, run the following code.
|
| 49 |
+
|
| 50 |
+
```python
|
| 51 |
+
import torch
|
| 52 |
+
import requests
|
| 53 |
+
from PIL import Image
|
| 54 |
+
from transformers import AutoModelForCausalLM
|
| 55 |
+
|
| 56 |
+
# Thinking mode & budget
|
| 57 |
+
enable_thinking = True
|
| 58 |
+
enable_thinking_budget = True # Only effective if enable_thinking is True.
|
| 59 |
+
|
| 60 |
+
# Total tokens for thinking + answer. Ensure: max_new_tokens > thinking_budget + 25
|
| 61 |
+
max_new_tokens = 2048
|
| 62 |
+
thinking_budget = 1024
|
| 63 |
+
|
| 64 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 65 |
+
"AIDC-AI/Ovis2.6-30B-A3B",
|
| 66 |
+
torch_dtype=torch.bfloat16,
|
| 67 |
+
trust_remote_code=True,
|
| 68 |
+
device_map="auto"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
messages = [{
|
| 72 |
+
"role": "user",
|
| 73 |
+
"content": [
|
| 74 |
+
{"type": "image", "image": Image.open(requests.get("https://cdn-uploads.huggingface.co/production/uploads/658a8a837959448ef5500ce5/TIlymOb86R6_Mez3bpmcB.png", stream=True).raw)},
|
| 75 |
+
{"type": "text", "text": "Calculate the sum of the numbers in the middle box in figure (c)."},
|
| 76 |
+
],
|
| 77 |
+
}]
|
| 78 |
+
|
| 79 |
+
input_ids, pixel_values, grid_thws = model.preprocess_inputs(
|
| 80 |
+
messages=messages,
|
| 81 |
+
add_generation_prompt=True,
|
| 82 |
+
enable_thinking=enable_thinking
|
| 83 |
+
)
|
| 84 |
+
input_ids = input_ids.cuda()
|
| 85 |
+
pixel_values = pixel_values.cuda() if pixel_values is not None else None
|
| 86 |
+
grid_thws = grid_thws.cuda() if grid_thws is not None else None
|
| 87 |
+
|
| 88 |
+
outputs = model.generate(
|
| 89 |
+
inputs=input_ids,
|
| 90 |
+
pixel_values=pixel_values,
|
| 91 |
+
grid_thws=grid_thws,
|
| 92 |
+
enable_thinking=enable_thinking,
|
| 93 |
+
enable_thinking_budget=enable_thinking_budget,
|
| 94 |
+
max_new_tokens=max_new_tokens,
|
| 95 |
+
thinking_budget=thinking_budget,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
response = model.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 99 |
+
print(response)
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
The thinking and thinking budget logic can be applied in the same way for multi-image, video and pure text scenarios.
|
| 103 |
+
|
| 104 |
+
**Note (answer extraction for CoT/Thinking):**
|
| 105 |
+
To make evaluation and usage easier, we recommend appending a fixed suffix to prompts when using chain-of-thought (CoT) or thinking mode. This ensures the model clearly outputs a final answer that can be extracted programmatically:
|
| 106 |
+
|
| 107 |
+
```
|
| 108 |
+
End your response with 'Final answer: '.
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
For example:
|
| 112 |
+
|
| 113 |
+
```
|
| 114 |
+
Calculate the sum of the numbers in the middle box in figure (c).
|
| 115 |
+
End your response with 'Final answer: '.
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
**Tip:** The sections below include an optional streaming helper (compatible with two-phase thinking/budget runs) and extra inference modes: multi-image, video, and text-only.
|
| 120 |
+
|
| 121 |
+
<details>
|
| 122 |
+
<summary>Optional: Streaming (Advanced)</summary>
|
| 123 |
+
|
| 124 |
+
To support thinking budget, we modified the implementation of the Ovis `generate` method and the default `TextIteratorStreamer` is now incompatible. If you need to stream model output, be sure to use the helper class below.
|
| 125 |
+
|
| 126 |
+
```python
|
| 127 |
+
# --- Budget-aware streamer helper ---
|
| 128 |
+
from transformers import TextIteratorStreamer
|
| 129 |
+
|
| 130 |
+
class BudgetAwareTextStreamer(TextIteratorStreamer):
|
| 131 |
+
"""A streamer compatible with Ovis two-phase generation.
|
| 132 |
+
|
| 133 |
+
Call .manual_end() after generation to flush any remaining text.
|
| 134 |
+
"""
|
| 135 |
+
def manual_end(self):
|
| 136 |
+
if len(self.token_cache) > 0:
|
| 137 |
+
text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
|
| 138 |
+
printable_text = text[self.print_len:]
|
| 139 |
+
self.token_cache = []
|
| 140 |
+
self.print_len = 0
|
| 141 |
+
else:
|
| 142 |
+
printable_text = ""
|
| 143 |
+
self.next_tokens_are_prompt = True
|
| 144 |
+
self.on_finalized_text(printable_text, stream_end=True)
|
| 145 |
+
|
| 146 |
+
# Disable base class's end hook; we'll finalize via manual_end()
|
| 147 |
+
def end(self):
|
| 148 |
+
pass
|
| 149 |
+
```
|
| 150 |
+
|
| 151 |
+
Example usage:
|
| 152 |
+
|
| 153 |
+
```python
|
| 154 |
+
streamer = BudgetAwareTextStreamer(
|
| 155 |
+
model.text_tokenizer,
|
| 156 |
+
skip_prompt=True,
|
| 157 |
+
skip_special_tokens=True
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
outputs = model.generate(
|
| 161 |
+
inputs=input_ids,
|
| 162 |
+
pixel_values=pixel_values,
|
| 163 |
+
grid_thws=grid_thws,
|
| 164 |
+
enable_thinking=enable_thinking,
|
| 165 |
+
enable_thinking_budget=enable_thinking_budget,
|
| 166 |
+
max_new_tokens=max_new_tokens,
|
| 167 |
+
thinking_budget=thinking_budget,
|
| 168 |
+
streamer=streamer
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
</details>
|
| 174 |
+
|
| 175 |
+
<details>
|
| 176 |
+
<summary>Example: Multi-image</summary>
|
| 177 |
+
Demonstrates how to run inference with multiple images and a related question.
|
| 178 |
+
|
| 179 |
+
```python
|
| 180 |
+
# Multi-image inference
|
| 181 |
+
multi_image_files = [
|
| 182 |
+
"/path/to/image_1.jpg",
|
| 183 |
+
"/path/to/image_2.jpg",
|
| 184 |
+
"/path/to/image_3.jpg",
|
| 185 |
+
]
|
| 186 |
+
|
| 187 |
+
content = [{"type": "image", "image": Image.open(p).convert("RGB")} for p in multi_image_files]
|
| 188 |
+
content.append({"type": "text", "text": "Describe the images."})
|
| 189 |
+
messages = [{"role": "user", "content": content}]
|
| 190 |
+
|
| 191 |
+
input_ids, pixel_values, grid_thws = model.preprocess_inputs(messages=messages, add_generation_prompt=True, max_pixels=896*896)
|
| 192 |
+
input_ids = input_ids.cuda()
|
| 193 |
+
pixel_values = pixel_values.cuda().to(model.dtype) if pixel_values is not None else None
|
| 194 |
+
grid_thws = grid_thws.cuda() if grid_thws is not None else None
|
| 195 |
+
|
| 196 |
+
with torch.no_grad():
|
| 197 |
+
outputs = model.generate(inputs=input_ids, pixel_values=pixel_values, grid_thws=grid_thws,
|
| 198 |
+
max_new_tokens=1024, do_sample=True,
|
| 199 |
+
eos_token_id=model.text_tokenizer.eos_token_id,
|
| 200 |
+
pad_token_id=model.text_tokenizer.pad_token_id)
|
| 201 |
+
print(model.text_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
</details>
|
| 205 |
+
|
| 206 |
+
<details>
|
| 207 |
+
<summary>Example: Video</summary>
|
| 208 |
+
Demonstrates how to run inference on a video by sampling multiple frames and asking the model to describe the content.
|
| 209 |
+
|
| 210 |
+
```python
|
| 211 |
+
# Video inference
|
| 212 |
+
from moviepy.editor import VideoFileClip # pip install moviepy==1.0.3
|
| 213 |
+
|
| 214 |
+
video_file = "/path/to/video_1.mp4"
|
| 215 |
+
num_frames = 8
|
| 216 |
+
|
| 217 |
+
with VideoFileClip(video_file) as clip:
|
| 218 |
+
total_frames = int(clip.fps * clip.duration)
|
| 219 |
+
indices = [int(i * total_frames / num_frames) for i in range(num_frames)]
|
| 220 |
+
frames = [Image.fromarray(clip.get_frame(t)) for t in (idx / clip.fps for idx in indices)]
|
| 221 |
+
|
| 222 |
+
messages = [{"role": "user", "content": [
|
| 223 |
+
{"type": "video", "video": frames},
|
| 224 |
+
{"type": "text", "text": "Describe this video in detail."},
|
| 225 |
+
]}]
|
| 226 |
+
|
| 227 |
+
input_ids, pixel_values, grid_thws = model.preprocess_inputs(messages=messages, add_generation_prompt=True, max_pixels=896*896)
|
| 228 |
+
input_ids = input_ids.cuda()
|
| 229 |
+
pixel_values = pixel_values.cuda().to(model.dtype) if pixel_values is not None else None
|
| 230 |
+
grid_thws = grid_thws.cuda() if grid_thws is not None else None
|
| 231 |
+
|
| 232 |
+
with torch.no_grad():
|
| 233 |
+
outputs = model.generate(inputs=input_ids, pixel_values=pixel_values, grid_thws=grid_thws,
|
| 234 |
+
max_new_tokens=1024, do_sample=True,
|
| 235 |
+
eos_token_id=model.text_tokenizer.eos_token_id,
|
| 236 |
+
pad_token_id=model.text_tokenizer.pad_token_id)
|
| 237 |
+
print(model.text_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 238 |
+
```
|
| 239 |
+
|
| 240 |
+
</details>
|
| 241 |
+
|
| 242 |
+
<details>
|
| 243 |
+
<summary>Example: Text-only</summary>
|
| 244 |
+
Demonstrates how to run inference using only text input without any images or videos.
|
| 245 |
+
|
| 246 |
+
```python
|
| 247 |
+
# Text-only inference
|
| 248 |
+
messages = [{"role": "user", "content": "Hi, please introduce Yellow Mountain."}]
|
| 249 |
+
|
| 250 |
+
input_ids, _, _ = model.preprocess_inputs(messages=messages, add_generation_prompt=True)
|
| 251 |
+
input_ids = input_ids.cuda()
|
| 252 |
+
|
| 253 |
+
with torch.no_grad():
|
| 254 |
+
outputs = model.generate(inputs=input_ids, max_new_tokens=1024, do_sample=True,
|
| 255 |
+
eos_token_id=model.text_tokenizer.eos_token_id,
|
| 256 |
+
pad_token_id=model.text_tokenizer.pad_token_id)
|
| 257 |
+
print(model.text_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
</details>
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
To enable grounding, end your prompt with `Please provide the bounding box coordinates.` (for boxes) or `Please provide the point coordinates.` (for points). To target a specific object, wrap its description in `<ref>` tags, e.g.:
|
| 264 |
+
|
| 265 |
+
```text
|
| 266 |
+
Find the <ref>red apple</ref> in the image. Please provide the bounding box coordinates.
|
| 267 |
+
```
|
| 268 |
+
|
| 269 |
+
Coordinates are normalized to `[0,1)` with the origin `(0,0)` at the top-left corner of the image.
|
| 270 |
+
|
| 271 |
+
* Point: `<point>(x,y)</point>`
|
| 272 |
+
* Bounding box: `<box>(x1,y1),(x2,y2)</box>` where `(x1,y1)` is top-left, `(x2,y2)` is bottom-right.
|
| 273 |
+
* Multiple results can be listed in square brackets: `[<box>(...)</box>,<box>(...)</box> ]`
|
| 274 |
+
|
| 275 |
+
Example:
|
| 276 |
+
|
| 277 |
+
```text
|
| 278 |
+
The image features a serene scene with <ref>three birds</ref>[
|
| 279 |
+
<box>(0.401,0.526),(0.430,0.557)</box>,
|
| 280 |
+
<box>(0.489,0.494),(0.516,0.526)</box>,
|
| 281 |
+
<box>(0.296,0.529),(0.324,0.576)</box>
|
| 282 |
+
] flying in formation against a clear blue sky.
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
## Performance
|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
|
| 289 |
+
## Citation
|
| 290 |
+
If you find Ovis useful, please consider citing the paper
|
| 291 |
+
```bibtex
|
| 292 |
+
@article{lu2025ovis25technicalreport,
|
| 293 |
+
title={Ovis2.5 Technical Report},
|
| 294 |
+
author={Shiyin Lu and Yang Li and Yu Xia and Yuwei Hu and Shanshan Zhao and Yanqing Ma and Zhichao Wei and Yinglun Li and Lunhao Duan and Jianshan Zhao and Yuxuan Han and Haijun Li and Wanying Chen and Junke Tang and Chengkun Hou and Zhixing Du and Tianli Zhou and Wenjie Zhang and Huping Ding and Jiahe Li and Wen Li and Gui Hu and Yiliang Gu and Siran Yang and Jiamang Wang and Hailong Sun and Yibo Wang and Hui Sun and Jinlong Huang and Yuping He and Shengze Shi and Weihong Zhang and Guodong Zheng and Junpeng Jiang and Sensen Gao and Yi-Feng Wu and Sijia Chen and Yuhui Chen and Qing-Guo Chen and Zhao Xu and Weihua Luo and Kaifu Zhang},
|
| 295 |
+
year={2025},
|
| 296 |
+
journal={arXiv:2508.11737}
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
@article{lu2024ovis,
|
| 300 |
+
title={Ovis: Structural Embedding Alignment for Multimodal Large Language Model},
|
| 301 |
+
author={Shiyin Lu and Yang Li and Qing-Guo Chen and Zhao Xu and Weihua Luo and Kaifu Zhang and Han-Jia Ye},
|
| 302 |
+
year={2024},
|
| 303 |
+
journal={arXiv:2405.20797}
|
| 304 |
+
}
|
| 305 |
+
```
|
| 306 |
+
|
| 307 |
+
## License
|
| 308 |
+
This project is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0.txt) (SPDX-License-Identifier: Apache-2.0).
|
| 309 |
+
|
| 310 |
+
## Disclaimer
|
| 311 |
+
We used compliance-checking algorithms during the training process, to ensure the compliance of the trained model to the best of our ability. Due to the complexity of the data and the diversity of language model usage scenarios, we cannot guarantee that the model is completely free of copyright issues or improper content. If you believe anything infringes on your rights or generates improper content, please contact us, and we will promptly address the matter.
|
added_tokens.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</think>": 151668,
|
| 3 |
+
"</tool_call>": 151658,
|
| 4 |
+
"</tool_response>": 151666,
|
| 5 |
+
"<think>": 151667,
|
| 6 |
+
"<tool_call>": 151657,
|
| 7 |
+
"<tool_response>": 151665,
|
| 8 |
+
"<|box_end|>": 151649,
|
| 9 |
+
"<|box_start|>": 151648,
|
| 10 |
+
"<|endoftext|>": 151643,
|
| 11 |
+
"<|file_sep|>": 151664,
|
| 12 |
+
"<|fim_middle|>": 151660,
|
| 13 |
+
"<|fim_pad|>": 151662,
|
| 14 |
+
"<|fim_prefix|>": 151659,
|
| 15 |
+
"<|fim_suffix|>": 151661,
|
| 16 |
+
"<|im_end|>": 151645,
|
| 17 |
+
"<|im_start|>": 151644,
|
| 18 |
+
"<|image_pad|>": 151655,
|
| 19 |
+
"<|object_ref_end|>": 151647,
|
| 20 |
+
"<|object_ref_start|>": 151646,
|
| 21 |
+
"<|quad_end|>": 151651,
|
| 22 |
+
"<|quad_start|>": 151650,
|
| 23 |
+
"<|repo_name|>": 151663,
|
| 24 |
+
"<|video_pad|>": 151656,
|
| 25 |
+
"<|vision_end|>": 151653,
|
| 26 |
+
"<|vision_pad|>": 151654,
|
| 27 |
+
"<|vision_start|>": 151652
|
| 28 |
+
}
|
chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{%- for message in messages %}{{- '<|im_start|>' + message.role + '\n'}}{%- if message.role == 'system' or message.role == 'user' %}{%- if message.content is string %}{{- message.content }}{%- else %}{%- for item in message.content %}{%- if item.type == 'text' and 'text' in item %}{{- item.text }}{%- elif item.type == 'image' %}{{- '<image>'}}{%- elif item.type == 'video' %}{{- '<video>'}}{%- else %}{{- raise_exception('Invalid content type. Supported types for system and user are text, image, video.')}}{%- endif %}{%- if not loop.last %}{{- '\n'}}{%- endif %}{%- endfor %}{%- endif %}{%- elif message.role == 'assistant' %}{%- set content = '' %}{%- if message.content is string %}{%- set content = message.content %}{%- else %}{%- set ns = namespace(content='') -%}{%- for item in message.content %}{%- if item.type == 'text' and 'text' in item %}{%- set ns.content = ns.content ~ item.text %}{%- else %}{{- raise_exception('Invalid content type. Supported type for assistant is text.')}}{%- endif %}{%- endfor %}{%- set content = ns.content -%}{%- endif %}{%- set content = content.split('</think>')[-1].lstrip('\n') %}{{- content }}{%- else %}{{- raise_exception('Invalid role. Supported roles are system, user, assistant.')}}{%- endif %}{{- '<|im_end|>\n'}}{%- endfor %}{%- if add_generation_prompt %}{{- '<|im_start|>assistant\n' }}{%- if enable_thinking is defined and enable_thinking is false %}{{- '<think>\n\n</think>\n\n' }}{%- endif %}{%- endif %}"
|
| 3 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Ovis2_6_MoeForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_ovis2_6.Ovis2_6_Moe_Config",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_ovis2_6.Ovis2_6_MoeForCausalLM"
|
| 8 |
+
},
|
| 9 |
+
"attention_bias": false,
|
| 10 |
+
"attention_dropout": 0.0,
|
| 11 |
+
"bos_token_id": 151643,
|
| 12 |
+
"conversation_formatter_class": "Qwen3ConversationFormatter",
|
| 13 |
+
"decoder_sparse_step": 1,
|
| 14 |
+
"eos_token_id": 151645,
|
| 15 |
+
"head_dim": 128,
|
| 16 |
+
"hidden_act": "silu",
|
| 17 |
+
"hidden_size": 2048,
|
| 18 |
+
"initializer_range": 0.02,
|
| 19 |
+
"intermediate_size": 6144,
|
| 20 |
+
"llm_config": {
|
| 21 |
+
"_attn_implementation_autoset": true,
|
| 22 |
+
"_name_or_path": "Qwen/Qwen3-30B-A3B-Thinking-2507",
|
| 23 |
+
"architectures": [
|
| 24 |
+
"Qwen3MoeForCausalLM"
|
| 25 |
+
],
|
| 26 |
+
"attention_bias": false,
|
| 27 |
+
"attention_dropout": 0.0,
|
| 28 |
+
"bos_token_id": 151643,
|
| 29 |
+
"decoder_sparse_step": 1,
|
| 30 |
+
"eos_token_id": 151645,
|
| 31 |
+
"head_dim": 128,
|
| 32 |
+
"hidden_act": "silu",
|
| 33 |
+
"hidden_size": 2048,
|
| 34 |
+
"initializer_range": 0.02,
|
| 35 |
+
"intermediate_size": 6144,
|
| 36 |
+
"max_position_embeddings": 262144,
|
| 37 |
+
"max_window_layers": 48,
|
| 38 |
+
"mlp_only_layers": [],
|
| 39 |
+
"model_type": "qwen3_moe",
|
| 40 |
+
"moe_intermediate_size": 768,
|
| 41 |
+
"norm_topk_prob": true,
|
| 42 |
+
"num_attention_heads": 32,
|
| 43 |
+
"num_experts": 128,
|
| 44 |
+
"num_experts_per_tok": 8,
|
| 45 |
+
"num_hidden_layers": 48,
|
| 46 |
+
"num_key_value_heads": 4,
|
| 47 |
+
"output_router_logits": false,
|
| 48 |
+
"rms_norm_eps": 1e-06,
|
| 49 |
+
"rope_scaling": null,
|
| 50 |
+
"rope_theta": 10000000,
|
| 51 |
+
"router_aux_loss_coef": 0.001,
|
| 52 |
+
"sliding_window": null,
|
| 53 |
+
"torch_dtype": "bfloat16",
|
| 54 |
+
"use_cache": false,
|
| 55 |
+
"use_sliding_window": false,
|
| 56 |
+
"vocab_size": 151936
|
| 57 |
+
},
|
| 58 |
+
"max_position_embeddings": 262144,
|
| 59 |
+
"max_window_layers": 48,
|
| 60 |
+
"mlp_only_layers": [],
|
| 61 |
+
"model_type": "ovis2_6_moe",
|
| 62 |
+
"moe_intermediate_size": 768,
|
| 63 |
+
"norm_topk_prob": true,
|
| 64 |
+
"num_attention_heads": 32,
|
| 65 |
+
"num_experts": 128,
|
| 66 |
+
"num_experts_per_tok": 8,
|
| 67 |
+
"num_hidden_layers": 48,
|
| 68 |
+
"num_key_value_heads": 4,
|
| 69 |
+
"output_router_logits": false,
|
| 70 |
+
"rms_norm_eps": 1e-06,
|
| 71 |
+
"rope_scaling": null,
|
| 72 |
+
"rope_theta": 10000000,
|
| 73 |
+
"router_aux_loss_coef": 0.001,
|
| 74 |
+
"sliding_window": null,
|
| 75 |
+
"tie_word_embeddings": false,
|
| 76 |
+
"torch_dtype": "bfloat16",
|
| 77 |
+
"transformers_version": "4.51.3",
|
| 78 |
+
"use_cache": false,
|
| 79 |
+
"use_sliding_window": false,
|
| 80 |
+
"visual_vocab_size": 65536,
|
| 81 |
+
"vit_config": {
|
| 82 |
+
"_attn_implementation_autoset": true,
|
| 83 |
+
"_name_or_path": "google/siglip2-so400m-patch16-512",
|
| 84 |
+
"attention_dropout": 0.0,
|
| 85 |
+
"fullatt_block_indexes": null,
|
| 86 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 87 |
+
"hidden_size": 1152,
|
| 88 |
+
"hidden_stride": 2,
|
| 89 |
+
"image_size": 512,
|
| 90 |
+
"intermediate_size": 4304,
|
| 91 |
+
"layer_norm_eps": 1e-06,
|
| 92 |
+
"model_type": "siglip2_navit",
|
| 93 |
+
"num_attention_heads": 16,
|
| 94 |
+
"num_channels": 3,
|
| 95 |
+
"num_hidden_layers": 27,
|
| 96 |
+
"num_patches": -1,
|
| 97 |
+
"patch_size": 16,
|
| 98 |
+
"preserve_original_pe": true,
|
| 99 |
+
"temporal_patch_size": 1,
|
| 100 |
+
"torch_dtype": "bfloat16",
|
| 101 |
+
"use_rope": true,
|
| 102 |
+
"window_size": 112
|
| 103 |
+
},
|
| 104 |
+
"vocab_size": 151936
|
| 105 |
+
}
|
configuration_ovis2_6.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional, List, Union
|
| 2 |
+
|
| 3 |
+
from transformers import Qwen3Config, Qwen3MoeConfig, Qwen3NextConfig
|
| 4 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 5 |
+
|
| 6 |
+
__all__ = ["Siglip2NavitConfig", "Ovis2_6_Config"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Siglip2NavitConfig(PretrainedConfig):
|
| 10 |
+
"""This is the configuration class to store the configuration of an [`AIMv2Model`].
|
| 11 |
+
|
| 12 |
+
Instantiating a configuration with the defaults will yield a similar configuration
|
| 13 |
+
to that of the [apple/aimv2-large-patch14-224](https://huggingface.co/apple/aimv2-large-patch14-224).
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
hidden_size: Dimension of the hidden representations.
|
| 17 |
+
intermediate_size: Dimension of the SwiGLU representations.
|
| 18 |
+
num_hidden_layers: Number of hidden layers in the Transformer.
|
| 19 |
+
num_attention_heads: Number of attention heads for each attention layer
|
| 20 |
+
in the Transformer.
|
| 21 |
+
num_channels: Number of input channels.
|
| 22 |
+
image_size: Image size.
|
| 23 |
+
patch_size: Patch size.
|
| 24 |
+
rms_norm_eps: Epsilon value used for the RMS normalization layer.
|
| 25 |
+
attention_dropout: Dropout ratio for attention probabilities.
|
| 26 |
+
projection_dropout: Dropout ratio for the projection layer after the attention.
|
| 27 |
+
qkv_bias: Whether to add a bias to the queries, keys and values.
|
| 28 |
+
use_bias: Whether to add a bias in the feed-forward and projection layers.
|
| 29 |
+
kwargs: Keyword arguments for the [`PretrainedConfig`].
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
model_type: str = "siglip2_navit"
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
hidden_size: int = 1024,
|
| 37 |
+
intermediate_size: int = 4096,
|
| 38 |
+
num_hidden_layers: int = 24,
|
| 39 |
+
num_attention_heads: int = 16,
|
| 40 |
+
num_channels: int = 3,
|
| 41 |
+
num_patches: int = -1,
|
| 42 |
+
image_size: int = 512,
|
| 43 |
+
patch_size: int = 16,
|
| 44 |
+
hidden_act: str="gelu_pytorch_tanh",
|
| 45 |
+
layer_norm_eps: float = 1e-6,
|
| 46 |
+
attention_dropout: float = 0.0,
|
| 47 |
+
hidden_stride: int = 2,
|
| 48 |
+
window_size: int = 112,
|
| 49 |
+
fullatt_block_indexes: Optional[list] = None,
|
| 50 |
+
temporal_patch_size: int = 1,
|
| 51 |
+
preserve_original_pe: bool = True,
|
| 52 |
+
use_rope: bool = True,
|
| 53 |
+
**kwargs: Any,
|
| 54 |
+
):
|
| 55 |
+
super().__init__(**kwargs)
|
| 56 |
+
self.hidden_size = hidden_size
|
| 57 |
+
self.intermediate_size = intermediate_size
|
| 58 |
+
self.num_hidden_layers = num_hidden_layers
|
| 59 |
+
self.num_attention_heads = num_attention_heads
|
| 60 |
+
self.num_channels = num_channels
|
| 61 |
+
self.num_patches = num_patches
|
| 62 |
+
self.patch_size = patch_size
|
| 63 |
+
self.image_size = image_size
|
| 64 |
+
self.hidden_act = hidden_act
|
| 65 |
+
self.attention_dropout = attention_dropout
|
| 66 |
+
self.layer_norm_eps = layer_norm_eps
|
| 67 |
+
self.hidden_stride = hidden_stride
|
| 68 |
+
self.window_size = window_size
|
| 69 |
+
self.fullatt_block_indexes = fullatt_block_indexes
|
| 70 |
+
self.temporal_patch_size = temporal_patch_size
|
| 71 |
+
self.preserve_original_pe = preserve_original_pe
|
| 72 |
+
self.use_rope = use_rope
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class Ovis2_6_Config(PretrainedConfig):
|
| 76 |
+
model_type = "ovis2_6"
|
| 77 |
+
sub_configs = dict(llm_config=Qwen3Config, vit_config=Siglip2NavitConfig)
|
| 78 |
+
|
| 79 |
+
def __init__(self,
|
| 80 |
+
llm_config: Optional[Union[Qwen3Config, dict]] = None,
|
| 81 |
+
vit_config: Optional[Union[Siglip2NavitConfig, dict]] = None,
|
| 82 |
+
visual_vocab_size=65536,
|
| 83 |
+
hidden_size=None,
|
| 84 |
+
**kwargs
|
| 85 |
+
):
|
| 86 |
+
super().__init__(**kwargs)
|
| 87 |
+
if isinstance(llm_config, dict):
|
| 88 |
+
llm_config = Qwen3Config(**llm_config)
|
| 89 |
+
self.llm_config = llm_config
|
| 90 |
+
if isinstance(vit_config, dict):
|
| 91 |
+
vit_config = Siglip2NavitConfig(**vit_config)
|
| 92 |
+
self.vit_config = vit_config
|
| 93 |
+
self.visual_vocab_size = visual_vocab_size
|
| 94 |
+
self.hidden_size = hidden_size
|
| 95 |
+
if kwargs.get('attn_implementation'):
|
| 96 |
+
self.llm_config._attn_implementation = kwargs['attn_implementation']
|
| 97 |
+
self.vit_config._attn_implementation = kwargs['attn_implementation']
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class Ovis2_6_Moe_Config(PretrainedConfig):
|
| 101 |
+
model_type = "ovis2_6_moe"
|
| 102 |
+
sub_configs = dict(llm_config=Qwen3MoeConfig, vit_config=Siglip2NavitConfig)
|
| 103 |
+
|
| 104 |
+
def __init__(self,
|
| 105 |
+
llm_config: Optional[Union[Qwen3MoeConfig, dict]] = None,
|
| 106 |
+
vit_config: Optional[Union[Siglip2NavitConfig, dict]] = None,
|
| 107 |
+
visual_vocab_size=65536,
|
| 108 |
+
hidden_size=None,
|
| 109 |
+
**kwargs
|
| 110 |
+
):
|
| 111 |
+
super().__init__(**kwargs)
|
| 112 |
+
if isinstance(llm_config, dict):
|
| 113 |
+
llm_config = Qwen3MoeConfig(**llm_config)
|
| 114 |
+
self.llm_config = llm_config
|
| 115 |
+
if isinstance(vit_config, dict):
|
| 116 |
+
vit_config = Siglip2NavitConfig(**vit_config)
|
| 117 |
+
self.vit_config = vit_config
|
| 118 |
+
self.visual_vocab_size = visual_vocab_size
|
| 119 |
+
self.hidden_size = hidden_size
|
| 120 |
+
if kwargs.get('attn_implementation'):
|
| 121 |
+
self.llm_config._attn_implementation = kwargs['attn_implementation']
|
| 122 |
+
self.vit_config._attn_implementation = kwargs['attn_implementation']
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class Ovis2_6_Next_Config(PretrainedConfig):
|
| 126 |
+
model_type = "ovis2_6_next"
|
| 127 |
+
sub_configs = dict(llm_config=Qwen3NextConfig, vit_config=Siglip2NavitConfig)
|
| 128 |
+
|
| 129 |
+
def __init__(self,
|
| 130 |
+
llm_config: Optional[Union[Qwen3NextConfig, dict]] = None,
|
| 131 |
+
vit_config: Optional[Union[Siglip2NavitConfig, dict]] = None,
|
| 132 |
+
visual_vocab_size=65536,
|
| 133 |
+
hidden_size=None,
|
| 134 |
+
**kwargs
|
| 135 |
+
):
|
| 136 |
+
super().__init__(**kwargs)
|
| 137 |
+
if isinstance(llm_config, dict):
|
| 138 |
+
llm_config = Qwen3NextConfig(**llm_config)
|
| 139 |
+
self.llm_config = llm_config
|
| 140 |
+
if isinstance(vit_config, dict):
|
| 141 |
+
vit_config = Siglip2NavitConfig(**vit_config)
|
| 142 |
+
self.vit_config = vit_config
|
| 143 |
+
self.visual_vocab_size = visual_vocab_size
|
| 144 |
+
self.hidden_size = hidden_size
|
| 145 |
+
if kwargs.get('attn_implementation'):
|
| 146 |
+
self.llm_config._attn_implementation = kwargs['attn_implementation']
|
| 147 |
+
self.vit_config._attn_implementation = kwargs['attn_implementation']
|
generation_config.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"multimodal_max_length": 8192,
|
| 9 |
+
"pad_token_id": 151643,
|
| 10 |
+
"repetition_penalty": 1.05,
|
| 11 |
+
"temperature": 0.6,
|
| 12 |
+
"top_k": 20,
|
| 13 |
+
"top_p": 0.95,
|
| 14 |
+
"transformers_version": "4.57.0"
|
| 15 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1138505c6cff23645fce86c881e28def57ef9f7cd470567f1a2ddb7376d81c29
|
| 3 |
+
size 4997190480
|
model-00002-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:947e44619497ed27b2df599d265469a2bb7638b7adbf4697f24f0bfed41afc89
|
| 3 |
+
size 4997747912
|
model-00003-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d591e576b2cdf422682b1d700f85c6cc08957382394215ce7d5895e48cd1664
|
| 3 |
+
size 4997748512
|
model-00004-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b69ccc665d62e288812c983be1dd12565a286328e7d915dbe4b1828587e0341b
|
| 3 |
+
size 4997749488
|
model-00005-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8619e6e516ede3deae193df2ea5ce42cf448a5df88a0ad10c5d8c53a41d4f648
|
| 3 |
+
size 4997749488
|
model-00006-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:566a33a4221f6f4a0f9841871d5d18f48613eb2583a58ba6b3ac918043c747d0
|
| 3 |
+
size 4997749488
|
model-00007-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd7fa49a4a9dace627d407905bdfb172412309be11f9630190c0446906f18209
|
| 3 |
+
size 4997749488
|
model-00008-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55d2baad86542b971ba39be06da72e57586b410cfbb368d10243007843fb973f
|
| 3 |
+
size 4997749488
|
model-00009-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b8955c0f3ef01bdd7e3d109fcd988f6c738559547f7d086bca1c53c03288944
|
| 3 |
+
size 4997749488
|
model-00010-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e6e09ed39cb2581816cabc1c1121a6b7775e038bac5b455a3cfcf240961a3b8
|
| 3 |
+
size 4997749488
|
model-00011-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34c1eb4a15fd678df40bd3be9bdc7a2d692c42fda19f24ad4898b3fa33ae1ecc
|
| 3 |
+
size 4997749488
|
model-00012-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fc4475054271e6e0ae50ee280275fdad817eb40c5a4ed500528440e37cc7beb
|
| 3 |
+
size 4997749488
|
model-00013-of-00013.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0a69c34b246a1a6031692cd95414f8bea6a158c38514bd801fa460d0918acb48
|
| 3 |
+
size 2793994576
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_ovis2_6.py
ADDED
|
@@ -0,0 +1,1458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import PIL.Image
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor, nn
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from transformers import (
|
| 10 |
+
AutoConfig,
|
| 11 |
+
AutoImageProcessor,
|
| 12 |
+
AutoModel,
|
| 13 |
+
AutoModelForCausalLM,
|
| 14 |
+
AutoTokenizer,
|
| 15 |
+
)
|
| 16 |
+
from transformers.activations import ACT2FN
|
| 17 |
+
from transformers.generation.utils import GenerateOutput
|
| 18 |
+
from transformers.modeling_outputs import BaseModelOutputWithNoAttention
|
| 19 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 20 |
+
from transformers.utils import is_flash_attn_2_available
|
| 21 |
+
|
| 22 |
+
from .configuration_ovis2_6 import Siglip2NavitConfig, Ovis2_6_Config, Ovis2_6_Moe_Config, Ovis2_6_Next_Config
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
if is_flash_attn_2_available():
|
| 26 |
+
from flash_attn import flash_attn_varlen_func
|
| 27 |
+
from flash_attn.layers.rotary import apply_rotary_emb
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
IMAGE_PLACEHOLDER = "<image>"
|
| 31 |
+
IMAGE_PLACEHOLDER_ID = -200
|
| 32 |
+
VIDEO_PLACEHOLDER = "<video>"
|
| 33 |
+
VIDEO_PLACEHOLDER_ID = -201
|
| 34 |
+
|
| 35 |
+
VISUAL_ATOM_ID = -300
|
| 36 |
+
INDICATOR_IDS = [-301, -302, -303, -304]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# copied from qwen2.5-vl
|
| 40 |
+
class VisionRotaryEmbedding(nn.Module):
|
| 41 |
+
def __init__(self, dim: int, theta: float = 10000.0) -> None:
|
| 42 |
+
super().__init__()
|
| 43 |
+
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
| 44 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 45 |
+
|
| 46 |
+
def forward(self, seqlen: int) -> torch.Tensor:
|
| 47 |
+
seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
|
| 48 |
+
freqs = torch.outer(seq, self.inv_freq)
|
| 49 |
+
return freqs
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class Siglip2VisionEmbeddings(nn.Module):
|
| 53 |
+
def __init__(self, config: Siglip2NavitConfig):
|
| 54 |
+
super().__init__()
|
| 55 |
+
self.config = config
|
| 56 |
+
self.embed_dim = config.hidden_size
|
| 57 |
+
self.patch_size = config.patch_size
|
| 58 |
+
self.image_size = config.image_size
|
| 59 |
+
self.num_patches = config.num_patches
|
| 60 |
+
self.preserve_original_pe = config.preserve_original_pe
|
| 61 |
+
self.hidden_stride = config.hidden_stride
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# siglip2 naflex
|
| 65 |
+
if self.num_patches > 0:
|
| 66 |
+
self.patch_embedding = nn.Linear(
|
| 67 |
+
in_features=config.num_channels * self.patch_size * self.patch_size,
|
| 68 |
+
out_features=self.embed_dim,
|
| 69 |
+
)
|
| 70 |
+
if self.preserve_original_pe:
|
| 71 |
+
self.position_embedding_size = int(self.num_patches**0.5)
|
| 72 |
+
self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
|
| 73 |
+
|
| 74 |
+
else:
|
| 75 |
+
self.patch_embedding = nn.Conv2d(
|
| 76 |
+
in_channels=config.num_channels,
|
| 77 |
+
out_channels=self.embed_dim,
|
| 78 |
+
kernel_size=self.patch_size,
|
| 79 |
+
stride=self.patch_size,
|
| 80 |
+
padding="valid",
|
| 81 |
+
)
|
| 82 |
+
if self.preserve_original_pe:
|
| 83 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 84 |
+
self.position_embedding_size = self.image_size // self.patch_size
|
| 85 |
+
self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
|
| 86 |
+
|
| 87 |
+
@staticmethod
|
| 88 |
+
def resize_positional_embeddings(
|
| 89 |
+
positional_embeddings: torch.Tensor,
|
| 90 |
+
spatial_shapes: torch.LongTensor,
|
| 91 |
+
max_length: int,
|
| 92 |
+
) -> torch.Tensor:
|
| 93 |
+
"""
|
| 94 |
+
Resize positional embeddings to image-specific size and pad to a fixed size.
|
| 95 |
+
Args:
|
| 96 |
+
positional_embeddings (`torch.Tensor`):
|
| 97 |
+
Position embeddings of shape (height, width, embed_dim)
|
| 98 |
+
spatial_shapes (`torch.LongTensor`):
|
| 99 |
+
Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
|
| 100 |
+
max_length (`int`):
|
| 101 |
+
Maximum length of the positional embeddings to pad resized positional embeddings to
|
| 102 |
+
Returns:
|
| 103 |
+
`torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
|
| 104 |
+
"""
|
| 105 |
+
batch_size = spatial_shapes.shape[0]
|
| 106 |
+
embed_dim = positional_embeddings.shape[-1]
|
| 107 |
+
source_dtype = positional_embeddings.dtype
|
| 108 |
+
|
| 109 |
+
resulted_positional_embeddings = torch.empty(
|
| 110 |
+
(batch_size, max_length, embed_dim),
|
| 111 |
+
device=positional_embeddings.device,
|
| 112 |
+
dtype=source_dtype,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
|
| 116 |
+
positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
|
| 117 |
+
|
| 118 |
+
# Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
|
| 119 |
+
if positional_embeddings.device.type == "cpu":
|
| 120 |
+
positional_embeddings = positional_embeddings.to(torch.float32)
|
| 121 |
+
|
| 122 |
+
for i in range(batch_size):
|
| 123 |
+
# (1, dim, height, width) -> (1, dim, target_height, target_width)
|
| 124 |
+
height, width = spatial_shapes[i]
|
| 125 |
+
resized_embeddings = F.interpolate(
|
| 126 |
+
positional_embeddings,
|
| 127 |
+
size=(height, width),
|
| 128 |
+
mode="bilinear",
|
| 129 |
+
align_corners=False,
|
| 130 |
+
antialias=True,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# (1, dim, target_height, target_width) -> (target_height * target_width, dim)
|
| 134 |
+
resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
|
| 135 |
+
|
| 136 |
+
# Cast to original dtype
|
| 137 |
+
resized_embeddings = resized_embeddings.to(source_dtype)
|
| 138 |
+
|
| 139 |
+
resulted_positional_embeddings[i, : height * width] = resized_embeddings
|
| 140 |
+
resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
|
| 141 |
+
|
| 142 |
+
return resulted_positional_embeddings
|
| 143 |
+
|
| 144 |
+
def forward(self, pixel_values: torch.FloatTensor,
|
| 145 |
+
grid_thws: Optional[torch.LongTensor] = None) -> torch.Tensor:
|
| 146 |
+
"""
|
| 147 |
+
Args:
|
| 148 |
+
pixel_values (`torch.FloatTensor`):
|
| 149 |
+
Pixel values of shape (num_patches, num_channels * temporal_patch_size * patch_size * patch_size)
|
| 150 |
+
grid_thws: (`torch.LongTensor`):
|
| 151 |
+
grid shape (num_patches, 3)
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
# Apply patch embeddings to already patchified pixel values
|
| 155 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 156 |
+
if isinstance(self.patch_embedding, nn.Linear):
|
| 157 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
|
| 158 |
+
elif isinstance(self.patch_embedding, nn.Conv2d):
|
| 159 |
+
pixel_values = pixel_values.view(-1, self.config.num_channels * self.config.temporal_patch_size, self.patch_size,
|
| 160 |
+
self.patch_size)
|
| 161 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
|
| 162 |
+
patch_embeds = patch_embeds.reshape(-1, self.embed_dim)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
if self.preserve_original_pe:
|
| 166 |
+
assert grid_thws is not None
|
| 167 |
+
pos_embed_new = torch.zeros_like(patch_embeds)
|
| 168 |
+
ori_h = ori_w = self.position_embedding_size
|
| 169 |
+
positional_embeddings = self.position_embedding.weight.reshape(
|
| 170 |
+
self.position_embedding_size, self.position_embedding_size, -1
|
| 171 |
+
).unsqueeze(0).permute(0,3,1,2)
|
| 172 |
+
# pos_embed = self.pos_embed.reshape(1, ori_h, ori_w, -1).permute(0, 3, 1, 2)
|
| 173 |
+
cnt = 0
|
| 174 |
+
for t, h, w in grid_thws:
|
| 175 |
+
thw = t * h * w
|
| 176 |
+
pe = F.interpolate(positional_embeddings, size=(h, w), mode='bicubic', align_corners=False)
|
| 177 |
+
pe = pe.permute(0, 2, 3, 1).reshape(1, h * w, -1)
|
| 178 |
+
pe = pe[0].repeat(t, 1)
|
| 179 |
+
pe = pe.reshape(t, h // self.hidden_stride, self.hidden_stride, w // self.hidden_stride,
|
| 180 |
+
self.hidden_stride, -1)
|
| 181 |
+
pe = pe.permute(0, 1, 3, 2, 4, 5).reshape(thw, -1)
|
| 182 |
+
pos_embed_new[cnt:cnt + thw] = pe
|
| 183 |
+
cnt += thw
|
| 184 |
+
patch_embeds = patch_embeds + pos_embed_new
|
| 185 |
+
|
| 186 |
+
return patch_embeds
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# copied from qwen2.5-vl
|
| 190 |
+
def apply_rotary_pos_emb_flashatt(
|
| 191 |
+
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
|
| 192 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 193 |
+
cos = cos.chunk(2, dim=-1)[0].contiguous()
|
| 194 |
+
sin = sin.chunk(2, dim=-1)[0].contiguous()
|
| 195 |
+
q_embed = apply_rotary_emb(q.float(), cos.float(), sin.float()).type_as(q)
|
| 196 |
+
k_embed = apply_rotary_emb(k.float(), cos.float(), sin.float()).type_as(k)
|
| 197 |
+
return q_embed, k_embed
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 201 |
+
def rotate_half(x):
|
| 202 |
+
"""Rotates half the hidden dims of the input."""
|
| 203 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 204 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 205 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def apply_rotary_pos_emb_vision(
|
| 209 |
+
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
|
| 210 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 211 |
+
orig_q_dtype = q.dtype
|
| 212 |
+
orig_k_dtype = k.dtype
|
| 213 |
+
q, k = q.float(), k.float()
|
| 214 |
+
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
|
| 215 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 216 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 217 |
+
q_embed = q_embed.to(orig_q_dtype)
|
| 218 |
+
k_embed = k_embed.to(orig_k_dtype)
|
| 219 |
+
return q_embed, k_embed
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class Siglip2Attention(nn.Module):
|
| 223 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 224 |
+
|
| 225 |
+
def __init__(self, config):
|
| 226 |
+
super().__init__()
|
| 227 |
+
self.config = config
|
| 228 |
+
self.embed_dim = config.hidden_size
|
| 229 |
+
self.num_heads = config.num_attention_heads
|
| 230 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 231 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 232 |
+
raise ValueError(
|
| 233 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
| 234 |
+
f" {self.num_heads})."
|
| 235 |
+
)
|
| 236 |
+
self.scale = self.head_dim**-0.5
|
| 237 |
+
self.dropout = config.attention_dropout
|
| 238 |
+
self.is_causal = False
|
| 239 |
+
|
| 240 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 241 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 242 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 243 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 244 |
+
|
| 245 |
+
self.use_rope = config.use_rope
|
| 246 |
+
|
| 247 |
+
def forward(
|
| 248 |
+
self,
|
| 249 |
+
hidden_states: torch.Tensor,
|
| 250 |
+
cu_seqlens: torch.Tensor,
|
| 251 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 252 |
+
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 253 |
+
"""Input shape: Batch x Time x Channel"""
|
| 254 |
+
|
| 255 |
+
seq_length, embed_dim = hidden_states.shape
|
| 256 |
+
|
| 257 |
+
queries = self.q_proj(hidden_states)
|
| 258 |
+
keys = self.k_proj(hidden_states)
|
| 259 |
+
values = self.v_proj(hidden_states)
|
| 260 |
+
|
| 261 |
+
queries = queries.view(seq_length, self.num_heads, self.head_dim)
|
| 262 |
+
keys = keys.view(seq_length, self.num_heads, self.head_dim)
|
| 263 |
+
values = values.view(seq_length, self.num_heads, self.head_dim)
|
| 264 |
+
|
| 265 |
+
if self.use_rope:
|
| 266 |
+
cos, sin = position_embeddings
|
| 267 |
+
if is_flash_attn_2_available():
|
| 268 |
+
queries, keys = apply_rotary_pos_emb_flashatt(queries.unsqueeze(0), keys.unsqueeze(0), cos, sin)
|
| 269 |
+
else:
|
| 270 |
+
queries, keys = apply_rotary_pos_emb_vision(queries.unsqueeze(0), keys.unsqueeze(0), cos, sin)
|
| 271 |
+
queries = queries.squeeze(0)
|
| 272 |
+
keys = keys.squeeze(0)
|
| 273 |
+
|
| 274 |
+
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
|
| 275 |
+
if is_flash_attn_2_available():
|
| 276 |
+
attn_output = flash_attn_varlen_func(queries, keys, values, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen).reshape(
|
| 277 |
+
seq_length, -1
|
| 278 |
+
)
|
| 279 |
+
else:
|
| 280 |
+
batch_size = cu_seqlens.shape[0] - 1
|
| 281 |
+
outputs = []
|
| 282 |
+
cu = cu_seqlens.tolist()
|
| 283 |
+
for i in range(batch_size):
|
| 284 |
+
start_idx = cu[i]
|
| 285 |
+
end_idx = cu[i + 1]
|
| 286 |
+
# Each sequence is processed independently.
|
| 287 |
+
q_i = queries[start_idx:end_idx].unsqueeze(0)
|
| 288 |
+
k_i = keys[start_idx:end_idx].unsqueeze(0)
|
| 289 |
+
v_i = values[start_idx:end_idx].unsqueeze(0)
|
| 290 |
+
# (1, seq_len, num_heads, head_dim) ->
|
| 291 |
+
# (1, num_heads, seq_len, head_dim)
|
| 292 |
+
q_i, k_i, v_i = [x.transpose(1, 2) for x in (q_i, k_i, v_i)]
|
| 293 |
+
output_i = F.scaled_dot_product_attention(q_i,
|
| 294 |
+
k_i,
|
| 295 |
+
v_i,
|
| 296 |
+
dropout_p=0.0)
|
| 297 |
+
# (1, num_heads, seq_len, head_dim) -> (seq_len, embed_dim)
|
| 298 |
+
output_i = output_i.transpose(1, 2).reshape(-1, self.embed_dim)
|
| 299 |
+
outputs.append(output_i)
|
| 300 |
+
attn_output = torch.cat(outputs, dim=0)
|
| 301 |
+
|
| 302 |
+
attn_output = self.out_proj(attn_output)
|
| 303 |
+
return attn_output
|
| 304 |
+
|
| 305 |
+
class Siglip2MLP(nn.Module):
|
| 306 |
+
def __init__(self, config):
|
| 307 |
+
super().__init__()
|
| 308 |
+
self.config = config
|
| 309 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 310 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 311 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 312 |
+
|
| 313 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 314 |
+
hidden_states = self.fc1(hidden_states)
|
| 315 |
+
hidden_states = self.activation_fn(hidden_states)
|
| 316 |
+
hidden_states = self.fc2(hidden_states)
|
| 317 |
+
return hidden_states
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class Siglip2EncoderLayer(nn.Module):
|
| 321 |
+
def __init__(self, config: Siglip2NavitConfig):
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.embed_dim = config.hidden_size
|
| 324 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 325 |
+
self.self_attn = Siglip2Attention(config)
|
| 326 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 327 |
+
self.mlp = Siglip2MLP(config)
|
| 328 |
+
|
| 329 |
+
def forward(
|
| 330 |
+
self,
|
| 331 |
+
hidden_states: torch.Tensor,
|
| 332 |
+
cu_seqlens: torch.Tensor,
|
| 333 |
+
position_embeddings: torch.Tensor
|
| 334 |
+
) -> tuple[torch.FloatTensor]:
|
| 335 |
+
"""
|
| 336 |
+
Args:
|
| 337 |
+
hidden_states (`torch.FloatTensor`):
|
| 338 |
+
Input to the layer of shape `(batch, seq_len, embed_dim)`.
|
| 339 |
+
attention_mask (`torch.FloatTensor`):
|
| 340 |
+
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
|
| 341 |
+
output_attentions (`bool`, *optional*, defaults to `False`):
|
| 342 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 343 |
+
returned tensors for more detail.
|
| 344 |
+
"""
|
| 345 |
+
residual = hidden_states
|
| 346 |
+
|
| 347 |
+
hidden_states = self.layer_norm1(hidden_states)
|
| 348 |
+
hidden_states = self.self_attn(
|
| 349 |
+
hidden_states=hidden_states,
|
| 350 |
+
cu_seqlens=cu_seqlens,
|
| 351 |
+
position_embeddings=position_embeddings
|
| 352 |
+
)
|
| 353 |
+
hidden_states = residual + hidden_states
|
| 354 |
+
|
| 355 |
+
residual = hidden_states
|
| 356 |
+
hidden_states = self.layer_norm2(hidden_states)
|
| 357 |
+
hidden_states = self.mlp(hidden_states)
|
| 358 |
+
hidden_states = residual + hidden_states
|
| 359 |
+
|
| 360 |
+
return hidden_states
|
| 361 |
+
|
| 362 |
+
class Siglip2Encoder(nn.Module):
|
| 363 |
+
"""
|
| 364 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 365 |
+
[`Siglip2EncoderLayer`].
|
| 366 |
+
Args:
|
| 367 |
+
config: Siglip2NavitConfig
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
def __init__(self, config: Siglip2NavitConfig):
|
| 371 |
+
super().__init__()
|
| 372 |
+
self.config = config
|
| 373 |
+
self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 374 |
+
self.gradient_checkpointing = False
|
| 375 |
+
|
| 376 |
+
self.rotary_pos_emb = VisionRotaryEmbedding(config.hidden_size // config.num_attention_heads // 2)
|
| 377 |
+
self.patch_size = config.patch_size
|
| 378 |
+
self.hidden_stride = config.hidden_stride
|
| 379 |
+
self.window_size = config.window_size
|
| 380 |
+
self.spatial_merge_unit = config.hidden_stride * config.hidden_stride
|
| 381 |
+
self.fullatt_block_indexes = None if config.fullatt_block_indexes is None else [int(i) for i in config.fullatt_block_indexes.split('|')]
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
# copied from qwen2.5_vl
|
| 385 |
+
def rot_pos_emb(self, grid_thw):
|
| 386 |
+
pos_ids = []
|
| 387 |
+
for t, h, w in grid_thw:
|
| 388 |
+
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
|
| 389 |
+
hpos_ids = hpos_ids.reshape(
|
| 390 |
+
h // self.hidden_stride,
|
| 391 |
+
self.hidden_stride,
|
| 392 |
+
w // self.hidden_stride,
|
| 393 |
+
self.hidden_stride,
|
| 394 |
+
)
|
| 395 |
+
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
|
| 396 |
+
hpos_ids = hpos_ids.flatten()
|
| 397 |
+
|
| 398 |
+
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
|
| 399 |
+
wpos_ids = wpos_ids.reshape(
|
| 400 |
+
h // self.hidden_stride,
|
| 401 |
+
self.hidden_stride,
|
| 402 |
+
w // self.hidden_stride,
|
| 403 |
+
self.hidden_stride,
|
| 404 |
+
)
|
| 405 |
+
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
|
| 406 |
+
wpos_ids = wpos_ids.flatten()
|
| 407 |
+
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
|
| 408 |
+
pos_ids = torch.cat(pos_ids, dim=0)
|
| 409 |
+
max_grid_size = grid_thw[:, 1:].max()
|
| 410 |
+
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
|
| 411 |
+
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
|
| 412 |
+
return rotary_pos_emb
|
| 413 |
+
|
| 414 |
+
def get_window_index(self, grid_thw):
|
| 415 |
+
window_index: list = []
|
| 416 |
+
cu_window_seqlens: list = [0]
|
| 417 |
+
window_index_id = 0
|
| 418 |
+
vit_merger_window_size = self.window_size // self.hidden_stride // self.patch_size # patch (after merge) number in each window
|
| 419 |
+
|
| 420 |
+
for grid_t, grid_h, grid_w in grid_thw:
|
| 421 |
+
llm_grid_h, llm_grid_w = (
|
| 422 |
+
grid_h // self.hidden_stride, # number of patch after merge
|
| 423 |
+
grid_w // self.hidden_stride,
|
| 424 |
+
)
|
| 425 |
+
index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w)
|
| 426 |
+
pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
|
| 427 |
+
pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
|
| 428 |
+
num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
|
| 429 |
+
num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
|
| 430 |
+
index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
|
| 431 |
+
index_padded = index_padded.reshape(
|
| 432 |
+
grid_t,
|
| 433 |
+
num_windows_h,
|
| 434 |
+
vit_merger_window_size,
|
| 435 |
+
num_windows_w,
|
| 436 |
+
vit_merger_window_size,
|
| 437 |
+
)
|
| 438 |
+
index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
|
| 439 |
+
grid_t,
|
| 440 |
+
num_windows_h * num_windows_w,
|
| 441 |
+
vit_merger_window_size,
|
| 442 |
+
vit_merger_window_size,
|
| 443 |
+
)
|
| 444 |
+
seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
|
| 445 |
+
index_padded = index_padded.reshape(-1)
|
| 446 |
+
index_new = index_padded[index_padded != -100]
|
| 447 |
+
window_index.append(index_new + window_index_id)
|
| 448 |
+
cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1]
|
| 449 |
+
cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
|
| 450 |
+
window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
|
| 451 |
+
window_index = torch.cat(window_index, dim=0)
|
| 452 |
+
|
| 453 |
+
return window_index, cu_window_seqlens
|
| 454 |
+
|
| 455 |
+
# Ignore copy
|
| 456 |
+
def forward(
|
| 457 |
+
self,
|
| 458 |
+
inputs_embeds,
|
| 459 |
+
grid_thws: torch.Tensor,
|
| 460 |
+
output_hidden_states: bool = False,
|
| 461 |
+
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, ...]]]:
|
| 462 |
+
r"""
|
| 463 |
+
Args:
|
| 464 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 465 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
| 466 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
| 467 |
+
than the model's internal embedding lookup matrix.
|
| 468 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 469 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 470 |
+
- 1 for tokens that are **not masked**,
|
| 471 |
+
- 0 for tokens that are **masked**.
|
| 472 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 473 |
+
output_attentions (`bool`, *optional*):
|
| 474 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 475 |
+
returned tensors for more detail.
|
| 476 |
+
output_hidden_states (`bool`, *optional*):
|
| 477 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 478 |
+
for more detail.
|
| 479 |
+
return_dict (`bool`, *optional*):
|
| 480 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 481 |
+
"""
|
| 482 |
+
|
| 483 |
+
rotary_pos_emb = self.rot_pos_emb(grid_thws)
|
| 484 |
+
window_index, cu_window_seqlens = self.get_window_index(grid_thws)
|
| 485 |
+
cu_window_seqlens = torch.tensor(
|
| 486 |
+
cu_window_seqlens,
|
| 487 |
+
device=inputs_embeds.device,
|
| 488 |
+
dtype=grid_thws.dtype if torch.jit.is_tracing() else torch.int32,
|
| 489 |
+
)
|
| 490 |
+
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
|
| 491 |
+
|
| 492 |
+
seq_len, _ = inputs_embeds.size()
|
| 493 |
+
inputs_embeds = inputs_embeds.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
|
| 494 |
+
inputs_embeds = inputs_embeds[window_index, :, :]
|
| 495 |
+
inputs_embeds = inputs_embeds.reshape(seq_len, -1)
|
| 496 |
+
rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
|
| 497 |
+
rotary_pos_emb = rotary_pos_emb[window_index, :, :]
|
| 498 |
+
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
|
| 499 |
+
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
|
| 500 |
+
position_embeddings = (emb.cos(), emb.sin())
|
| 501 |
+
|
| 502 |
+
cu_seqlens = torch.repeat_interleave(grid_thws[:, 1] * grid_thws[:, 2], grid_thws[:, 0]).cumsum(
|
| 503 |
+
dim=0,
|
| 504 |
+
# Select dtype based on the following factors:
|
| 505 |
+
# - FA2 requires that cu_seqlens_q must have dtype int32
|
| 506 |
+
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
|
| 507 |
+
# See https://github.com/huggingface/transformers/pull/34852 for more information
|
| 508 |
+
dtype=grid_thws.dtype if torch.jit.is_tracing() else torch.int32,
|
| 509 |
+
)
|
| 510 |
+
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
|
| 511 |
+
|
| 512 |
+
reverse_indices = torch.argsort(window_index)
|
| 513 |
+
encoder_states = () if output_hidden_states else None
|
| 514 |
+
|
| 515 |
+
hidden_states = inputs_embeds
|
| 516 |
+
for index, block in enumerate(self.layers):
|
| 517 |
+
if self.fullatt_block_indexes is None or index in self.fullatt_block_indexes:
|
| 518 |
+
cu_seqlens_tmp = cu_seqlens
|
| 519 |
+
else:
|
| 520 |
+
cu_seqlens_tmp = cu_window_seqlens
|
| 521 |
+
if self.gradient_checkpointing and self.training:
|
| 522 |
+
hidden_states = self._gradient_checkpointing_func(block.__call__, hidden_states, cu_seqlens_tmp, position_embeddings)
|
| 523 |
+
else:
|
| 524 |
+
hidden_states = block(hidden_states, cu_seqlens_tmp, position_embeddings)
|
| 525 |
+
if output_hidden_states:
|
| 526 |
+
hidden_states_ = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
|
| 527 |
+
encoder_states += (hidden_states_[reverse_indices, :].reshape(seq_len, -1),)
|
| 528 |
+
# tokens = self.post_trunk_norm(tokens)
|
| 529 |
+
hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
|
| 530 |
+
hidden_states = hidden_states[reverse_indices, :].reshape(seq_len, -1)
|
| 531 |
+
|
| 532 |
+
return hidden_states, encoder_states
|
| 533 |
+
|
| 534 |
+
class Siglip2VisionTransformer(nn.Module):
|
| 535 |
+
def __init__(self, config: Siglip2NavitConfig):
|
| 536 |
+
super().__init__()
|
| 537 |
+
self.config = config
|
| 538 |
+
embed_dim = config.hidden_size
|
| 539 |
+
|
| 540 |
+
self.embeddings = Siglip2VisionEmbeddings(config)
|
| 541 |
+
self.encoder = Siglip2Encoder(config)
|
| 542 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 543 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 544 |
+
|
| 545 |
+
def forward(
|
| 546 |
+
self,
|
| 547 |
+
pixel_values: torch.FloatTensor,
|
| 548 |
+
grid_thws: torch.LongTensor,
|
| 549 |
+
output_hidden_states: Optional[bool] = True,
|
| 550 |
+
return_dict: Optional[bool] = True,
|
| 551 |
+
) -> Union[
|
| 552 |
+
Tuple[torch.Tensor],
|
| 553 |
+
Tuple[torch.Tensor, Tuple[torch.Tensor, ...]],
|
| 554 |
+
BaseModelOutputWithNoAttention,
|
| 555 |
+
]:
|
| 556 |
+
r"""
|
| 557 |
+
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
|
| 558 |
+
Tensor containing the spatial dimensions (height, width) of the input images.
|
| 559 |
+
"""
|
| 560 |
+
# output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 561 |
+
# output_hidden_states = (
|
| 562 |
+
# output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 563 |
+
# )
|
| 564 |
+
|
| 565 |
+
hidden_states = self.embeddings(pixel_values, grid_thws)
|
| 566 |
+
|
| 567 |
+
last_hidden_state, hidden_states = self.encoder(hidden_states, grid_thws, output_hidden_states)
|
| 568 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
| 569 |
+
|
| 570 |
+
if not return_dict:
|
| 571 |
+
output = (last_hidden_state,)
|
| 572 |
+
output += (hidden_states,) if output_hidden_states else ()
|
| 573 |
+
return output
|
| 574 |
+
|
| 575 |
+
return BaseModelOutputWithNoAttention(
|
| 576 |
+
last_hidden_state=last_hidden_state,
|
| 577 |
+
hidden_states=hidden_states
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
class Siglip2PreTrainedModel(PreTrainedModel):
|
| 581 |
+
config_class = Siglip2NavitConfig
|
| 582 |
+
base_model_prefix = "siglip2_navit"
|
| 583 |
+
supports_gradient_checkpointing = True
|
| 584 |
+
|
| 585 |
+
_no_split_modules = [
|
| 586 |
+
"Siglip2VisionEmbeddings",
|
| 587 |
+
"Siglip2EncoderLayer",
|
| 588 |
+
]
|
| 589 |
+
_supports_flash_attn_2 = True
|
| 590 |
+
_supports_sdpa = False
|
| 591 |
+
_supports_flex_attn = False
|
| 592 |
+
_supports_attention_backend = True
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
class Siglip2NavitModel(Siglip2PreTrainedModel):
|
| 596 |
+
config_class = Siglip2NavitConfig
|
| 597 |
+
main_input_name = "pixel_values"
|
| 598 |
+
|
| 599 |
+
def __init__(self, config: Siglip2NavitConfig):
|
| 600 |
+
super().__init__(config)
|
| 601 |
+
|
| 602 |
+
self.vision_model = Siglip2VisionTransformer(config)
|
| 603 |
+
|
| 604 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 605 |
+
return self.vision_model.embeddings.patch_embedding
|
| 606 |
+
|
| 607 |
+
def forward(
|
| 608 |
+
self,
|
| 609 |
+
pixel_values: torch.FloatTensor,
|
| 610 |
+
grid_thws: torch.LongTensor,
|
| 611 |
+
output_hidden_states: Optional[bool] = None,
|
| 612 |
+
return_dict: Optional[bool] = None,
|
| 613 |
+
) -> Union[
|
| 614 |
+
Tuple[torch.Tensor],
|
| 615 |
+
Tuple[torch.Tensor, Tuple[torch.Tensor, ...]],
|
| 616 |
+
BaseModelOutputWithNoAttention,
|
| 617 |
+
]:
|
| 618 |
+
|
| 619 |
+
if output_hidden_states is None:
|
| 620 |
+
output_hidden_states = self.config.output_hidden_states
|
| 621 |
+
if return_dict is None:
|
| 622 |
+
return_dict = self.config.use_return_dict
|
| 623 |
+
|
| 624 |
+
return self.vision_model(
|
| 625 |
+
pixel_values=pixel_values,
|
| 626 |
+
grid_thws=grid_thws,
|
| 627 |
+
output_hidden_states=output_hidden_states,
|
| 628 |
+
return_dict=return_dict,
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
class VisualEmbedding(torch.nn.Embedding):
|
| 632 |
+
"""
|
| 633 |
+
A visual embedding layer that can handle both discrete token IDs (long) and continuous
|
| 634 |
+
soft-token probabilities (float).
|
| 635 |
+
"""
|
| 636 |
+
|
| 637 |
+
def forward(self, visual_tokens: Tensor) -> Tensor:
|
| 638 |
+
if visual_tokens.dtype in [torch.int8, torch.int16, torch.int32, torch.int64, torch.long]:
|
| 639 |
+
return super().forward(visual_tokens)
|
| 640 |
+
# Handle soft tokens (probabilities) by matrix multiplication with the embedding weight
|
| 641 |
+
return torch.matmul(visual_tokens, self.weight)
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
class VisualTokenizer(torch.nn.Module):
|
| 645 |
+
"""
|
| 646 |
+
Tokenizes images or videos into a sequence of continuous visual tokens.
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
def __init__(self, vit, visual_vocab_size, image_processor_name_or_path, *args, **kwargs):
|
| 650 |
+
super().__init__(*args, **kwargs)
|
| 651 |
+
self.vit = vit
|
| 652 |
+
self.image_processor = AutoImageProcessor.from_pretrained(image_processor_name_or_path, do_center_crop=False)
|
| 653 |
+
head_dim = visual_vocab_size - len(INDICATOR_IDS)
|
| 654 |
+
self.head = torch.nn.Sequential(
|
| 655 |
+
torch.nn.Linear(self.vit.config.hidden_size * self.vit.config.hidden_stride ** 2, head_dim, bias=False),
|
| 656 |
+
torch.nn.LayerNorm(head_dim)
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
def _encode(self, pixel_values, grid_thws):
|
| 660 |
+
output = self.vit(pixel_values, grid_thws, output_hidden_states=True, return_dict=True)
|
| 661 |
+
features = output.hidden_states[-1]
|
| 662 |
+
seq_len, _ = features.shape
|
| 663 |
+
features = features.reshape(seq_len // (self.vit.config.hidden_stride ** 2), -1)
|
| 664 |
+
return features
|
| 665 |
+
|
| 666 |
+
# Adapted from qwen2_vl
|
| 667 |
+
@staticmethod
|
| 668 |
+
def smart_resize(
|
| 669 |
+
height: int, width: int, factor: int = 28, min_pixels: int = 448 * 448, max_pixels: int = 1344 * 1792
|
| 670 |
+
):
|
| 671 |
+
"""Rescales the image so that the following conditions are met:
|
| 672 |
+
1. Both dimensions are divisible by 'factor'.
|
| 673 |
+
2. The total number of pixels is within ['min_pixels', 'max_pixels'].
|
| 674 |
+
3. The aspect ratio is maintained as closely as possible.
|
| 675 |
+
"""
|
| 676 |
+
if height < factor or width < factor:
|
| 677 |
+
if height < width:
|
| 678 |
+
width = round(factor / height * width)
|
| 679 |
+
height = factor
|
| 680 |
+
else:
|
| 681 |
+
height = round(factor / width * height)
|
| 682 |
+
width = factor
|
| 683 |
+
|
| 684 |
+
elif max(height, width) / min(height, width) > 200:
|
| 685 |
+
if height > width:
|
| 686 |
+
height = 200 * width
|
| 687 |
+
else:
|
| 688 |
+
width = 200 * height
|
| 689 |
+
|
| 690 |
+
h_bar = round(height / factor) * factor
|
| 691 |
+
w_bar = round(width / factor) * factor
|
| 692 |
+
if h_bar * w_bar > max_pixels:
|
| 693 |
+
beta = math.sqrt((height * width) / max_pixels)
|
| 694 |
+
h_bar = math.floor(height / beta / factor) * factor
|
| 695 |
+
w_bar = math.floor(width / beta / factor) * factor
|
| 696 |
+
elif h_bar * w_bar < min_pixels:
|
| 697 |
+
beta = math.sqrt(min_pixels / (height * width))
|
| 698 |
+
h_bar = math.ceil(height * beta / factor) * factor
|
| 699 |
+
w_bar = math.ceil(width * beta / factor) * factor
|
| 700 |
+
return h_bar, w_bar
|
| 701 |
+
|
| 702 |
+
def preprocess(
|
| 703 |
+
self,
|
| 704 |
+
image: Optional[PIL.Image.Image] = None,
|
| 705 |
+
video: Optional[List[PIL.Image.Image]] = None,
|
| 706 |
+
min_pixels: Optional[int] = None,
|
| 707 |
+
max_pixels: Optional[int] = None
|
| 708 |
+
):
|
| 709 |
+
patch_size = self.vit.config.patch_size
|
| 710 |
+
temporal_patch_size = self.vit.config.temporal_patch_size
|
| 711 |
+
hidden_stride = self.vit.config.hidden_stride
|
| 712 |
+
assert (image is None) ^ (video is None), "Invalid input: expect either image or video"
|
| 713 |
+
if image is not None:
|
| 714 |
+
images = [image]
|
| 715 |
+
else:
|
| 716 |
+
images = video
|
| 717 |
+
images = [image.convert("RGB") if image.mode != 'RGB' else image for image in images]
|
| 718 |
+
width, height = images[0].size
|
| 719 |
+
processed_images = []
|
| 720 |
+
for image in images:
|
| 721 |
+
resized_height, resized_width = self.smart_resize(
|
| 722 |
+
height,
|
| 723 |
+
width,
|
| 724 |
+
factor=patch_size * hidden_stride,
|
| 725 |
+
min_pixels=min_pixels,
|
| 726 |
+
max_pixels=max_pixels,
|
| 727 |
+
)
|
| 728 |
+
new_size = dict(height=resized_height, width=resized_width)
|
| 729 |
+
new_image = self.image_processor.preprocess(image, size=new_size, return_tensors="np")['pixel_values'][0]
|
| 730 |
+
processed_images.append(new_image)
|
| 731 |
+
|
| 732 |
+
patches = np.array(processed_images)
|
| 733 |
+
if patches.shape[0] % temporal_patch_size != 0:
|
| 734 |
+
repeats = np.repeat(patches[-1][np.newaxis], temporal_patch_size - 1, axis=0)
|
| 735 |
+
patches = np.concatenate([patches, repeats], axis=0)
|
| 736 |
+
channel = patches.shape[1]
|
| 737 |
+
grid_t = patches.shape[0] // temporal_patch_size
|
| 738 |
+
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
|
| 739 |
+
grid_thw = torch.tensor([[grid_t, grid_h, grid_w]])
|
| 740 |
+
|
| 741 |
+
patches = patches.reshape(
|
| 742 |
+
grid_t, temporal_patch_size, channel,
|
| 743 |
+
grid_h // hidden_stride, hidden_stride, patch_size,
|
| 744 |
+
grid_w // hidden_stride, hidden_stride, patch_size,
|
| 745 |
+
)
|
| 746 |
+
patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
|
| 747 |
+
flatten_patches = patches.reshape(
|
| 748 |
+
grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
|
| 749 |
+
)
|
| 750 |
+
flatten_patches = torch.tensor(flatten_patches)
|
| 751 |
+
|
| 752 |
+
return flatten_patches, grid_thw
|
| 753 |
+
|
| 754 |
+
def forward(
|
| 755 |
+
self, pixel_values, grid_thws
|
| 756 |
+
) -> torch.Tensor: # [BatchSize, ImageShape] -> [BatchSize, #Token, VocabSize]
|
| 757 |
+
features = self._encode(pixel_values, grid_thws)
|
| 758 |
+
logits = self.head(features)
|
| 759 |
+
tokens = torch.softmax(logits, dim=-1, dtype=torch.float32).to(logits.dtype)
|
| 760 |
+
|
| 761 |
+
token_len, _ = tokens.shape
|
| 762 |
+
padding_tensor = torch.zeros(size=(token_len, len(INDICATOR_IDS)),
|
| 763 |
+
dtype=tokens.dtype,
|
| 764 |
+
device=tokens.device,
|
| 765 |
+
layout=tokens.layout,
|
| 766 |
+
requires_grad=False)
|
| 767 |
+
tokens = torch.cat((tokens, padding_tensor), dim=1)
|
| 768 |
+
return tokens
|
| 769 |
+
|
| 770 |
+
|
| 771 |
+
class Ovis2_6_PreTrainedModel(PreTrainedModel):
|
| 772 |
+
config_class = Ovis2_6_Config
|
| 773 |
+
base_model_prefix = "ovis2_6"
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
class Ovis2_6ForCausalLM(Ovis2_6_PreTrainedModel):
|
| 777 |
+
_supports_flash_attn_2 = True
|
| 778 |
+
|
| 779 |
+
def __init__(self, config: Ovis2_6_Config, *inputs, **kwargs):
|
| 780 |
+
super().__init__(config, *inputs, **kwargs)
|
| 781 |
+
|
| 782 |
+
self.llm = AutoModelForCausalLM.from_config(self.config.llm_config)
|
| 783 |
+
assert self.config.hidden_size == self.llm.config.hidden_size, "hidden size mismatch"
|
| 784 |
+
self.text_tokenizer = AutoTokenizer.from_pretrained(self.config.name_or_path)
|
| 785 |
+
self.visual_tokenizer = VisualTokenizer(vit=AutoModel.from_config(self.config.vit_config),
|
| 786 |
+
visual_vocab_size=self.config.visual_vocab_size,
|
| 787 |
+
image_processor_name_or_path=self.config.name_or_path)
|
| 788 |
+
|
| 789 |
+
self.vte = VisualEmbedding(self.config.visual_vocab_size, self.config.hidden_size,
|
| 790 |
+
device=self.visual_tokenizer.vit.device, dtype=self.visual_tokenizer.vit.dtype)
|
| 791 |
+
indicator_token_indices = torch.arange(
|
| 792 |
+
self.config.visual_vocab_size - len(INDICATOR_IDS),
|
| 793 |
+
self.config.visual_vocab_size,
|
| 794 |
+
dtype=torch.long
|
| 795 |
+
)
|
| 796 |
+
self.register_buffer("indicator_token_indices", indicator_token_indices, persistent=False)
|
| 797 |
+
|
| 798 |
+
def _merge_modules(modules_list: tuple):
|
| 799 |
+
merged_modules = []
|
| 800 |
+
for modules in modules_list:
|
| 801 |
+
merged_modules.extend(modules if modules else [])
|
| 802 |
+
return merged_modules
|
| 803 |
+
|
| 804 |
+
# Standard model configurations for parallelism and device placement
|
| 805 |
+
self._no_split_modules = _merge_modules(
|
| 806 |
+
(self.llm._no_split_modules, self.visual_tokenizer.vit._no_split_modules))
|
| 807 |
+
self._skip_keys_device_placement = self.llm._skip_keys_device_placement
|
| 808 |
+
self._keep_in_fp32_modules = _merge_modules(
|
| 809 |
+
(self.llm._keep_in_fp32_modules, self.visual_tokenizer.vit._keep_in_fp32_modules))
|
| 810 |
+
self.is_parallelizable = all((self.llm.is_parallelizable, self.visual_tokenizer.vit.is_parallelizable))
|
| 811 |
+
self.supports_gradient_checkpointing = True
|
| 812 |
+
|
| 813 |
+
def tie_weights(self):
|
| 814 |
+
self.llm.tie_weights()
|
| 815 |
+
|
| 816 |
+
def get_wte(self):
|
| 817 |
+
return self.llm.get_input_embeddings()
|
| 818 |
+
|
| 819 |
+
def forward(
|
| 820 |
+
self,
|
| 821 |
+
input_ids: torch.Tensor,
|
| 822 |
+
attention_mask: torch.Tensor,
|
| 823 |
+
pixel_values: Optional[torch.Tensor],
|
| 824 |
+
grid_thws: Optional[torch.Tensor],
|
| 825 |
+
labels: Optional[torch.Tensor] = None,
|
| 826 |
+
**kwargs
|
| 827 |
+
):
|
| 828 |
+
inputs_embeds = self.merge_multimodal(
|
| 829 |
+
input_ids=input_ids,
|
| 830 |
+
pixel_values=pixel_values,
|
| 831 |
+
grid_thws=grid_thws,
|
| 832 |
+
)
|
| 833 |
+
return self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, **kwargs)
|
| 834 |
+
|
| 835 |
+
def merge_multimodal(
|
| 836 |
+
self,
|
| 837 |
+
input_ids: torch.Tensor,
|
| 838 |
+
pixel_values: Optional[torch.Tensor],
|
| 839 |
+
grid_thws: Optional[torch.Tensor],
|
| 840 |
+
):
|
| 841 |
+
placeholder_token_mask = torch.lt(input_ids, 0)
|
| 842 |
+
multimodal_embeds = self.get_wte()(torch.masked_fill(input_ids, placeholder_token_mask, 0))
|
| 843 |
+
|
| 844 |
+
if pixel_values is not None:
|
| 845 |
+
visual_indicator_embeds = self.vte(self.indicator_token_indices).to(
|
| 846 |
+
dtype=multimodal_embeds.dtype, device=multimodal_embeds.device
|
| 847 |
+
)
|
| 848 |
+
visual_tokens = self.visual_tokenizer(pixel_values, grid_thws)
|
| 849 |
+
visual_embeds = self.vte(visual_tokens).to(dtype=multimodal_embeds.dtype, device=multimodal_embeds.device)
|
| 850 |
+
|
| 851 |
+
for i, indicator_id in enumerate(INDICATOR_IDS):
|
| 852 |
+
multimodal_embeds[input_ids == indicator_id] = visual_indicator_embeds[i]
|
| 853 |
+
multimodal_embeds[input_ids == VISUAL_ATOM_ID] = visual_embeds
|
| 854 |
+
|
| 855 |
+
return multimodal_embeds
|
| 856 |
+
|
| 857 |
+
def _merge_inputs(
|
| 858 |
+
self, raw_input_ids, placeholder_id, grid_thws, indicator_begin_id, indicator_end_id
|
| 859 |
+
):
|
| 860 |
+
input_ids = []
|
| 861 |
+
prev_index = 0
|
| 862 |
+
placeholder_indexes = [i for i, v in enumerate(raw_input_ids) if v == placeholder_id]
|
| 863 |
+
for placeholder_index, grid_thw in zip(placeholder_indexes, grid_thws):
|
| 864 |
+
input_ids.extend(raw_input_ids[prev_index:placeholder_index])
|
| 865 |
+
num_image_atoms = grid_thw.prod().item()
|
| 866 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.hidden_stride ** 2
|
| 867 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.temporal_patch_size
|
| 868 |
+
input_ids.extend([indicator_begin_id] + [VISUAL_ATOM_ID] * num_image_atoms + [indicator_end_id])
|
| 869 |
+
prev_index = placeholder_index + 1
|
| 870 |
+
input_ids.extend(raw_input_ids[prev_index:])
|
| 871 |
+
return input_ids
|
| 872 |
+
|
| 873 |
+
def _tokenize_with_visual_placeholder(self, text):
|
| 874 |
+
placeholder = VIDEO_PLACEHOLDER if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER
|
| 875 |
+
placeholder_id = VIDEO_PLACEHOLDER_ID if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER_ID
|
| 876 |
+
chunks = [self.text_tokenizer(chunk, add_special_tokens=False).input_ids for chunk in text.split(placeholder)]
|
| 877 |
+
input_ids = chunks[0]
|
| 878 |
+
for chunk in chunks[1:]:
|
| 879 |
+
input_ids.append(placeholder_id)
|
| 880 |
+
input_ids.extend(chunk)
|
| 881 |
+
return input_ids
|
| 882 |
+
|
| 883 |
+
def preprocess_inputs(
|
| 884 |
+
self,
|
| 885 |
+
messages: List[Union[str, Dict]],
|
| 886 |
+
min_pixels=448 * 448,
|
| 887 |
+
max_pixels=1792 * 1792,
|
| 888 |
+
add_generation_prompt=True,
|
| 889 |
+
enable_thinking=False
|
| 890 |
+
):
|
| 891 |
+
text = self.text_tokenizer.apply_chat_template(
|
| 892 |
+
messages,
|
| 893 |
+
tokenize=False,
|
| 894 |
+
add_generation_prompt=add_generation_prompt,
|
| 895 |
+
enable_thinking=enable_thinking
|
| 896 |
+
)
|
| 897 |
+
input_ids = self._tokenize_with_visual_placeholder(text)
|
| 898 |
+
images = []
|
| 899 |
+
videos = []
|
| 900 |
+
for message in messages:
|
| 901 |
+
content = message["content"]
|
| 902 |
+
if isinstance(content, list):
|
| 903 |
+
images.extend([item["image"] for item in content if item.get("image") is not None])
|
| 904 |
+
videos.extend([item["video"] for item in content if item.get("video") is not None])
|
| 905 |
+
if images and videos:
|
| 906 |
+
raise ValueError(
|
| 907 |
+
"Multiple visual input data types detected (both image and video provided). "
|
| 908 |
+
"This model supports only one type of visual input data at a time. "
|
| 909 |
+
"Please provide either image or video, but not both."
|
| 910 |
+
)
|
| 911 |
+
|
| 912 |
+
pixel_values, grid_thws = None, None
|
| 913 |
+
if images:
|
| 914 |
+
pixel_values, grid_thws = zip(
|
| 915 |
+
*(self.visual_tokenizer.preprocess(image=image, min_pixels=min_pixels, max_pixels=max_pixels)
|
| 916 |
+
for image in images)
|
| 917 |
+
)
|
| 918 |
+
input_ids = self._merge_inputs(
|
| 919 |
+
input_ids, IMAGE_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[0], INDICATOR_IDS[1]
|
| 920 |
+
)
|
| 921 |
+
pixel_values = torch.cat(pixel_values, dim=0)
|
| 922 |
+
grid_thws = torch.cat(grid_thws, dim=0)
|
| 923 |
+
elif videos:
|
| 924 |
+
assert len(videos) == 1, "only support single video"
|
| 925 |
+
pixel_values, grid_thws = self.visual_tokenizer.preprocess(
|
| 926 |
+
video=videos[0], min_pixels=min_pixels, max_pixels=max_pixels
|
| 927 |
+
)
|
| 928 |
+
input_ids = self._merge_inputs(
|
| 929 |
+
input_ids, VIDEO_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[2], INDICATOR_IDS[3]
|
| 930 |
+
)
|
| 931 |
+
|
| 932 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0)
|
| 933 |
+
|
| 934 |
+
return input_ids, pixel_values, grid_thws
|
| 935 |
+
|
| 936 |
+
def generate(
|
| 937 |
+
self,
|
| 938 |
+
inputs: Optional[torch.Tensor] = None,
|
| 939 |
+
**kwargs,
|
| 940 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 941 |
+
attention_mask = torch.ne(inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 942 |
+
inputs_embeds = self.merge_multimodal(
|
| 943 |
+
input_ids=inputs,
|
| 944 |
+
pixel_values=kwargs.pop('pixel_values', None),
|
| 945 |
+
grid_thws=kwargs.pop('grid_thws', None)
|
| 946 |
+
)
|
| 947 |
+
enable_thinking = kwargs.pop('enable_thinking', False)
|
| 948 |
+
enable_thinking_budget = kwargs.pop('enable_thinking_budget', False)
|
| 949 |
+
thinking_budget = kwargs.pop('thinking_budget', 1024)
|
| 950 |
+
|
| 951 |
+
if enable_thinking and enable_thinking_budget:
|
| 952 |
+
actual_max_new_tokens = kwargs['max_new_tokens']
|
| 953 |
+
kwargs['max_new_tokens'] = thinking_budget
|
| 954 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 955 |
+
output_ids = generated_ids
|
| 956 |
+
output_ids_list = generated_ids[0]
|
| 957 |
+
|
| 958 |
+
# check if the generation has already finished (151645 is <|im_end|>)
|
| 959 |
+
if 151645 not in output_ids_list:
|
| 960 |
+
# check if the thinking process has finished (151668 is </think>)
|
| 961 |
+
# and prepare the second model input
|
| 962 |
+
if 151668 not in output_ids_list:
|
| 963 |
+
early_stopping_text = "\n\nConsidering the limited time by the user, I have to give the solution based on the thinking directly now.\n</think>\n\n"
|
| 964 |
+
early_stopping_ids = self.text_tokenizer(early_stopping_text, return_tensors="pt", return_attention_mask=False).input_ids.to(inputs.device)
|
| 965 |
+
input_ids_appendent = torch.cat([output_ids, early_stopping_ids], dim=-1)
|
| 966 |
+
kwargs['streamer'].put(early_stopping_ids) if 'streamer' in kwargs else None
|
| 967 |
+
else:
|
| 968 |
+
input_ids_appendent = output_ids
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
# second generation
|
| 972 |
+
new_inputs = torch.cat([inputs, input_ids_appendent], dim=-1)
|
| 973 |
+
attention_mask = torch.ne(new_inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 974 |
+
inputs_embeds_appendent = self.merge_multimodal(
|
| 975 |
+
input_ids=input_ids_appendent,
|
| 976 |
+
pixel_values=None,
|
| 977 |
+
grid_thws=None
|
| 978 |
+
)
|
| 979 |
+
new_inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_appendent], dim=-2)
|
| 980 |
+
|
| 981 |
+
kwargs['max_new_tokens'] = inputs_embeds.size(-2) + actual_max_new_tokens - new_inputs_embeds.size(-2)
|
| 982 |
+
generated_ids2 = self.llm.generate(inputs=None, inputs_embeds=new_inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 983 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 984 |
+
return torch.cat([input_ids_appendent, generated_ids2], dim=-1)
|
| 985 |
+
|
| 986 |
+
else:
|
| 987 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 988 |
+
return generated_ids
|
| 989 |
+
|
| 990 |
+
else:
|
| 991 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 992 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 993 |
+
return generated_ids
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
class Ovis2_6_Moe_PreTrainedModel(PreTrainedModel):
|
| 1000 |
+
config_class = Ovis2_6_Moe_Config
|
| 1001 |
+
base_model_prefix = "ovis2_6_moe"
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
class Ovis2_6_MoeForCausalLM(Ovis2_6_Moe_PreTrainedModel):
|
| 1005 |
+
_supports_flash_attn_2 = True
|
| 1006 |
+
|
| 1007 |
+
def __init__(self, config: Ovis2_6_Moe_Config, *inputs, **kwargs):
|
| 1008 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1009 |
+
|
| 1010 |
+
self.llm = AutoModelForCausalLM.from_config(self.config.llm_config)
|
| 1011 |
+
assert self.config.hidden_size == self.llm.config.hidden_size, "hidden size mismatch"
|
| 1012 |
+
self.text_tokenizer = AutoTokenizer.from_pretrained(self.config.name_or_path)
|
| 1013 |
+
self.visual_tokenizer = VisualTokenizer(vit=AutoModel.from_config(self.config.vit_config),
|
| 1014 |
+
visual_vocab_size=self.config.visual_vocab_size,
|
| 1015 |
+
image_processor_name_or_path=self.config.name_or_path)
|
| 1016 |
+
|
| 1017 |
+
self.vte = VisualEmbedding(self.config.visual_vocab_size, self.config.hidden_size,
|
| 1018 |
+
device=self.visual_tokenizer.vit.device, dtype=self.visual_tokenizer.vit.dtype)
|
| 1019 |
+
indicator_token_indices = torch.arange(
|
| 1020 |
+
self.config.visual_vocab_size - len(INDICATOR_IDS),
|
| 1021 |
+
self.config.visual_vocab_size,
|
| 1022 |
+
dtype=torch.long
|
| 1023 |
+
)
|
| 1024 |
+
self.register_buffer("indicator_token_indices", indicator_token_indices, persistent=False)
|
| 1025 |
+
|
| 1026 |
+
def _merge_modules(modules_list: tuple):
|
| 1027 |
+
merged_modules = []
|
| 1028 |
+
for modules in modules_list:
|
| 1029 |
+
merged_modules.extend(modules if modules else [])
|
| 1030 |
+
return merged_modules
|
| 1031 |
+
|
| 1032 |
+
# Standard model configurations for parallelism and device placement
|
| 1033 |
+
self._no_split_modules = _merge_modules(
|
| 1034 |
+
(self.llm._no_split_modules, self.visual_tokenizer.vit._no_split_modules))
|
| 1035 |
+
self._skip_keys_device_placement = self.llm._skip_keys_device_placement
|
| 1036 |
+
self._keep_in_fp32_modules = _merge_modules(
|
| 1037 |
+
(self.llm._keep_in_fp32_modules, self.visual_tokenizer.vit._keep_in_fp32_modules))
|
| 1038 |
+
self.is_parallelizable = all((self.llm.is_parallelizable, self.visual_tokenizer.vit.is_parallelizable))
|
| 1039 |
+
self.supports_gradient_checkpointing = True
|
| 1040 |
+
|
| 1041 |
+
def tie_weights(self):
|
| 1042 |
+
self.llm.tie_weights()
|
| 1043 |
+
|
| 1044 |
+
def get_wte(self):
|
| 1045 |
+
return self.llm.get_input_embeddings()
|
| 1046 |
+
|
| 1047 |
+
def forward(
|
| 1048 |
+
self,
|
| 1049 |
+
input_ids: torch.Tensor,
|
| 1050 |
+
attention_mask: torch.Tensor,
|
| 1051 |
+
pixel_values: Optional[torch.Tensor],
|
| 1052 |
+
grid_thws: Optional[torch.Tensor],
|
| 1053 |
+
labels: Optional[torch.Tensor] = None,
|
| 1054 |
+
**kwargs
|
| 1055 |
+
):
|
| 1056 |
+
inputs_embeds = self.merge_multimodal(
|
| 1057 |
+
input_ids=input_ids,
|
| 1058 |
+
pixel_values=pixel_values,
|
| 1059 |
+
grid_thws=grid_thws,
|
| 1060 |
+
)
|
| 1061 |
+
return self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, **kwargs)
|
| 1062 |
+
|
| 1063 |
+
def merge_multimodal(
|
| 1064 |
+
self,
|
| 1065 |
+
input_ids: torch.Tensor,
|
| 1066 |
+
pixel_values: Optional[torch.Tensor],
|
| 1067 |
+
grid_thws: Optional[torch.Tensor],
|
| 1068 |
+
):
|
| 1069 |
+
placeholder_token_mask = torch.lt(input_ids, 0)
|
| 1070 |
+
multimodal_embeds = self.get_wte()(torch.masked_fill(input_ids, placeholder_token_mask, 0))
|
| 1071 |
+
|
| 1072 |
+
if pixel_values is not None:
|
| 1073 |
+
visual_indicator_embeds = self.vte(self.indicator_token_indices).to(
|
| 1074 |
+
dtype=multimodal_embeds.dtype, device=multimodal_embeds.device
|
| 1075 |
+
)
|
| 1076 |
+
visual_tokens = self.visual_tokenizer(pixel_values, grid_thws)
|
| 1077 |
+
visual_embeds = self.vte(visual_tokens).to(dtype=multimodal_embeds.dtype, device=multimodal_embeds.device)
|
| 1078 |
+
|
| 1079 |
+
for i, indicator_id in enumerate(INDICATOR_IDS):
|
| 1080 |
+
multimodal_embeds[input_ids == indicator_id] = visual_indicator_embeds[i]
|
| 1081 |
+
multimodal_embeds[input_ids == VISUAL_ATOM_ID] = visual_embeds
|
| 1082 |
+
|
| 1083 |
+
return multimodal_embeds
|
| 1084 |
+
|
| 1085 |
+
def _merge_inputs(
|
| 1086 |
+
self, raw_input_ids, placeholder_id, grid_thws, indicator_begin_id, indicator_end_id
|
| 1087 |
+
):
|
| 1088 |
+
input_ids = []
|
| 1089 |
+
prev_index = 0
|
| 1090 |
+
placeholder_indexes = [i for i, v in enumerate(raw_input_ids) if v == placeholder_id]
|
| 1091 |
+
for placeholder_index, grid_thw in zip(placeholder_indexes, grid_thws):
|
| 1092 |
+
input_ids.extend(raw_input_ids[prev_index:placeholder_index])
|
| 1093 |
+
num_image_atoms = grid_thw.prod().item()
|
| 1094 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.hidden_stride ** 2
|
| 1095 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.temporal_patch_size
|
| 1096 |
+
input_ids.extend([indicator_begin_id] + [VISUAL_ATOM_ID] * num_image_atoms + [indicator_end_id])
|
| 1097 |
+
prev_index = placeholder_index + 1
|
| 1098 |
+
input_ids.extend(raw_input_ids[prev_index:])
|
| 1099 |
+
return input_ids
|
| 1100 |
+
|
| 1101 |
+
def _tokenize_with_visual_placeholder(self, text):
|
| 1102 |
+
placeholder = VIDEO_PLACEHOLDER if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER
|
| 1103 |
+
placeholder_id = VIDEO_PLACEHOLDER_ID if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER_ID
|
| 1104 |
+
chunks = [self.text_tokenizer(chunk, add_special_tokens=False).input_ids for chunk in text.split(placeholder)]
|
| 1105 |
+
input_ids = chunks[0]
|
| 1106 |
+
for chunk in chunks[1:]:
|
| 1107 |
+
input_ids.append(placeholder_id)
|
| 1108 |
+
input_ids.extend(chunk)
|
| 1109 |
+
return input_ids
|
| 1110 |
+
|
| 1111 |
+
def preprocess_inputs(
|
| 1112 |
+
self,
|
| 1113 |
+
messages: List[Union[str, Dict]],
|
| 1114 |
+
min_pixels=448 * 448,
|
| 1115 |
+
max_pixels=1792 * 1792,
|
| 1116 |
+
add_generation_prompt=True,
|
| 1117 |
+
enable_thinking=False
|
| 1118 |
+
):
|
| 1119 |
+
text = self.text_tokenizer.apply_chat_template(
|
| 1120 |
+
messages,
|
| 1121 |
+
tokenize=False,
|
| 1122 |
+
add_generation_prompt=add_generation_prompt,
|
| 1123 |
+
enable_thinking=enable_thinking
|
| 1124 |
+
)
|
| 1125 |
+
input_ids = self._tokenize_with_visual_placeholder(text)
|
| 1126 |
+
images = []
|
| 1127 |
+
videos = []
|
| 1128 |
+
for message in messages:
|
| 1129 |
+
content = message["content"]
|
| 1130 |
+
if isinstance(content, list):
|
| 1131 |
+
images.extend([item["image"] for item in content if item.get("image") is not None])
|
| 1132 |
+
videos.extend([item["video"] for item in content if item.get("video") is not None])
|
| 1133 |
+
if images and videos:
|
| 1134 |
+
raise ValueError(
|
| 1135 |
+
"Multiple visual input data types detected (both image and video provided). "
|
| 1136 |
+
"This model supports only one type of visual input data at a time. "
|
| 1137 |
+
"Please provide either image or video, but not both."
|
| 1138 |
+
)
|
| 1139 |
+
|
| 1140 |
+
pixel_values, grid_thws = None, None
|
| 1141 |
+
if images:
|
| 1142 |
+
pixel_values, grid_thws = zip(
|
| 1143 |
+
*(self.visual_tokenizer.preprocess(image=image, min_pixels=min_pixels, max_pixels=max_pixels)
|
| 1144 |
+
for image in images)
|
| 1145 |
+
)
|
| 1146 |
+
input_ids = self._merge_inputs(
|
| 1147 |
+
input_ids, IMAGE_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[0], INDICATOR_IDS[1]
|
| 1148 |
+
)
|
| 1149 |
+
pixel_values = torch.cat(pixel_values, dim=0)
|
| 1150 |
+
grid_thws = torch.cat(grid_thws, dim=0)
|
| 1151 |
+
elif videos:
|
| 1152 |
+
assert len(videos) == 1, "only support single video"
|
| 1153 |
+
pixel_values, grid_thws = self.visual_tokenizer.preprocess(
|
| 1154 |
+
video=videos[0], min_pixels=min_pixels, max_pixels=max_pixels
|
| 1155 |
+
)
|
| 1156 |
+
input_ids = self._merge_inputs(
|
| 1157 |
+
input_ids, VIDEO_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[2], INDICATOR_IDS[3]
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0)
|
| 1161 |
+
|
| 1162 |
+
return input_ids, pixel_values, grid_thws
|
| 1163 |
+
|
| 1164 |
+
def generate(
|
| 1165 |
+
self,
|
| 1166 |
+
inputs: Optional[torch.Tensor] = None,
|
| 1167 |
+
**kwargs,
|
| 1168 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 1169 |
+
attention_mask = torch.ne(inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 1170 |
+
inputs_embeds = self.merge_multimodal(
|
| 1171 |
+
input_ids=inputs,
|
| 1172 |
+
pixel_values=kwargs.pop('pixel_values', None),
|
| 1173 |
+
grid_thws=kwargs.pop('grid_thws', None)
|
| 1174 |
+
)
|
| 1175 |
+
enable_thinking = kwargs.pop('enable_thinking', False)
|
| 1176 |
+
enable_thinking_budget = kwargs.pop('enable_thinking_budget', False)
|
| 1177 |
+
thinking_budget = kwargs.pop('thinking_budget', 1024)
|
| 1178 |
+
|
| 1179 |
+
if enable_thinking and enable_thinking_budget:
|
| 1180 |
+
actual_max_new_tokens = kwargs['max_new_tokens']
|
| 1181 |
+
kwargs['max_new_tokens'] = thinking_budget
|
| 1182 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1183 |
+
output_ids = generated_ids
|
| 1184 |
+
output_ids_list = generated_ids[0]
|
| 1185 |
+
|
| 1186 |
+
# check if the generation has already finished (151645 is <|im_end|>)
|
| 1187 |
+
if 151645 not in output_ids_list:
|
| 1188 |
+
# check if the thinking process has finished (151668 is </think>)
|
| 1189 |
+
# and prepare the second model input
|
| 1190 |
+
if 151668 not in output_ids_list:
|
| 1191 |
+
early_stopping_text = "\n\nConsidering the limited time by the user, I have to give the solution based on the thinking directly now.\n</think>\n\n"
|
| 1192 |
+
early_stopping_ids = self.text_tokenizer(early_stopping_text, return_tensors="pt", return_attention_mask=False).input_ids.to(inputs.device)
|
| 1193 |
+
input_ids_appendent = torch.cat([output_ids, early_stopping_ids], dim=-1)
|
| 1194 |
+
kwargs['streamer'].put(early_stopping_ids) if 'streamer' in kwargs else None
|
| 1195 |
+
else:
|
| 1196 |
+
input_ids_appendent = output_ids
|
| 1197 |
+
|
| 1198 |
+
|
| 1199 |
+
# second generation
|
| 1200 |
+
new_inputs = torch.cat([inputs, input_ids_appendent], dim=-1)
|
| 1201 |
+
attention_mask = torch.ne(new_inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 1202 |
+
inputs_embeds_appendent = self.merge_multimodal(
|
| 1203 |
+
input_ids=input_ids_appendent,
|
| 1204 |
+
pixel_values=None,
|
| 1205 |
+
grid_thws=None
|
| 1206 |
+
)
|
| 1207 |
+
new_inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_appendent], dim=-2)
|
| 1208 |
+
|
| 1209 |
+
kwargs['max_new_tokens'] = inputs_embeds.size(-2) + actual_max_new_tokens - new_inputs_embeds.size(-2)
|
| 1210 |
+
generated_ids2 = self.llm.generate(inputs=None, inputs_embeds=new_inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1211 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1212 |
+
return torch.cat([input_ids_appendent, generated_ids2], dim=-1)
|
| 1213 |
+
|
| 1214 |
+
else:
|
| 1215 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1216 |
+
return generated_ids
|
| 1217 |
+
|
| 1218 |
+
else:
|
| 1219 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1220 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1221 |
+
return generated_ids
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
class Ovis2_6_Next_PreTrainedModel(PreTrainedModel):
|
| 1225 |
+
config_class = Ovis2_6_Next_Config
|
| 1226 |
+
base_model_prefix = "ovis2_6_next"
|
| 1227 |
+
|
| 1228 |
+
|
| 1229 |
+
class Ovis2_6_NextForCausalLM(Ovis2_6_Next_PreTrainedModel):
|
| 1230 |
+
_supports_flash_attn_2 = True
|
| 1231 |
+
|
| 1232 |
+
def __init__(self, config: Ovis2_6_Next_Config, *inputs, **kwargs):
|
| 1233 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1234 |
+
|
| 1235 |
+
self.llm = AutoModelForCausalLM.from_config(self.config.llm_config)
|
| 1236 |
+
assert self.config.hidden_size == self.llm.config.hidden_size, "hidden size mismatch"
|
| 1237 |
+
self.text_tokenizer = AutoTokenizer.from_pretrained(self.config.name_or_path)
|
| 1238 |
+
self.visual_tokenizer = VisualTokenizer(vit=AutoModel.from_config(self.config.vit_config),
|
| 1239 |
+
visual_vocab_size=self.config.visual_vocab_size,
|
| 1240 |
+
image_processor_name_or_path=self.config.name_or_path)
|
| 1241 |
+
|
| 1242 |
+
self.vte = VisualEmbedding(self.config.visual_vocab_size, self.config.hidden_size,
|
| 1243 |
+
device=self.visual_tokenizer.vit.device, dtype=self.visual_tokenizer.vit.dtype)
|
| 1244 |
+
indicator_token_indices = torch.arange(
|
| 1245 |
+
self.config.visual_vocab_size - len(INDICATOR_IDS),
|
| 1246 |
+
self.config.visual_vocab_size,
|
| 1247 |
+
dtype=torch.long
|
| 1248 |
+
)
|
| 1249 |
+
self.register_buffer("indicator_token_indices", indicator_token_indices, persistent=False)
|
| 1250 |
+
|
| 1251 |
+
def _merge_modules(modules_list: tuple):
|
| 1252 |
+
merged_modules = []
|
| 1253 |
+
for modules in modules_list:
|
| 1254 |
+
merged_modules.extend(modules if modules else [])
|
| 1255 |
+
return merged_modules
|
| 1256 |
+
|
| 1257 |
+
# Standard model configurations for parallelism and device placement
|
| 1258 |
+
self._no_split_modules = _merge_modules(
|
| 1259 |
+
(self.llm._no_split_modules, self.visual_tokenizer.vit._no_split_modules))
|
| 1260 |
+
self._skip_keys_device_placement = self.llm._skip_keys_device_placement
|
| 1261 |
+
self._keep_in_fp32_modules = _merge_modules(
|
| 1262 |
+
(self.llm._keep_in_fp32_modules, self.visual_tokenizer.vit._keep_in_fp32_modules))
|
| 1263 |
+
self.is_parallelizable = all((self.llm.is_parallelizable, self.visual_tokenizer.vit.is_parallelizable))
|
| 1264 |
+
self.supports_gradient_checkpointing = True
|
| 1265 |
+
|
| 1266 |
+
def tie_weights(self):
|
| 1267 |
+
self.llm.tie_weights()
|
| 1268 |
+
|
| 1269 |
+
def get_wte(self):
|
| 1270 |
+
return self.llm.get_input_embeddings()
|
| 1271 |
+
|
| 1272 |
+
def forward(
|
| 1273 |
+
self,
|
| 1274 |
+
input_ids: torch.Tensor,
|
| 1275 |
+
attention_mask: torch.Tensor,
|
| 1276 |
+
pixel_values: Optional[torch.Tensor],
|
| 1277 |
+
grid_thws: Optional[torch.Tensor],
|
| 1278 |
+
labels: Optional[torch.Tensor] = None,
|
| 1279 |
+
**kwargs
|
| 1280 |
+
):
|
| 1281 |
+
inputs_embeds = self.merge_multimodal(
|
| 1282 |
+
input_ids=input_ids,
|
| 1283 |
+
pixel_values=pixel_values,
|
| 1284 |
+
grid_thws=grid_thws,
|
| 1285 |
+
)
|
| 1286 |
+
return self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, **kwargs)
|
| 1287 |
+
|
| 1288 |
+
def merge_multimodal(
|
| 1289 |
+
self,
|
| 1290 |
+
input_ids: torch.Tensor,
|
| 1291 |
+
pixel_values: Optional[torch.Tensor],
|
| 1292 |
+
grid_thws: Optional[torch.Tensor],
|
| 1293 |
+
):
|
| 1294 |
+
placeholder_token_mask = torch.lt(input_ids, 0)
|
| 1295 |
+
multimodal_embeds = self.get_wte()(torch.masked_fill(input_ids, placeholder_token_mask, 0))
|
| 1296 |
+
|
| 1297 |
+
if pixel_values is not None:
|
| 1298 |
+
visual_indicator_embeds = self.vte(self.indicator_token_indices).to(
|
| 1299 |
+
dtype=multimodal_embeds.dtype, device=multimodal_embeds.device
|
| 1300 |
+
)
|
| 1301 |
+
visual_tokens = self.visual_tokenizer(pixel_values, grid_thws)
|
| 1302 |
+
visual_embeds = self.vte(visual_tokens).to(dtype=multimodal_embeds.dtype, device=multimodal_embeds.device)
|
| 1303 |
+
|
| 1304 |
+
for i, indicator_id in enumerate(INDICATOR_IDS):
|
| 1305 |
+
multimodal_embeds[input_ids == indicator_id] = visual_indicator_embeds[i]
|
| 1306 |
+
multimodal_embeds[input_ids == VISUAL_ATOM_ID] = visual_embeds
|
| 1307 |
+
|
| 1308 |
+
return multimodal_embeds
|
| 1309 |
+
|
| 1310 |
+
def _merge_inputs(
|
| 1311 |
+
self, raw_input_ids, placeholder_id, grid_thws, indicator_begin_id, indicator_end_id
|
| 1312 |
+
):
|
| 1313 |
+
input_ids = []
|
| 1314 |
+
prev_index = 0
|
| 1315 |
+
placeholder_indexes = [i for i, v in enumerate(raw_input_ids) if v == placeholder_id]
|
| 1316 |
+
for placeholder_index, grid_thw in zip(placeholder_indexes, grid_thws):
|
| 1317 |
+
input_ids.extend(raw_input_ids[prev_index:placeholder_index])
|
| 1318 |
+
num_image_atoms = grid_thw.prod().item()
|
| 1319 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.hidden_stride ** 2
|
| 1320 |
+
num_image_atoms //= self.visual_tokenizer.vit.config.temporal_patch_size
|
| 1321 |
+
input_ids.extend([indicator_begin_id] + [VISUAL_ATOM_ID] * num_image_atoms + [indicator_end_id])
|
| 1322 |
+
prev_index = placeholder_index + 1
|
| 1323 |
+
input_ids.extend(raw_input_ids[prev_index:])
|
| 1324 |
+
return input_ids
|
| 1325 |
+
|
| 1326 |
+
def _tokenize_with_visual_placeholder(self, text):
|
| 1327 |
+
placeholder = VIDEO_PLACEHOLDER if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER
|
| 1328 |
+
placeholder_id = VIDEO_PLACEHOLDER_ID if VIDEO_PLACEHOLDER in text else IMAGE_PLACEHOLDER_ID
|
| 1329 |
+
chunks = [self.text_tokenizer(chunk, add_special_tokens=False).input_ids for chunk in text.split(placeholder)]
|
| 1330 |
+
input_ids = chunks[0]
|
| 1331 |
+
for chunk in chunks[1:]:
|
| 1332 |
+
input_ids.append(placeholder_id)
|
| 1333 |
+
input_ids.extend(chunk)
|
| 1334 |
+
return input_ids
|
| 1335 |
+
|
| 1336 |
+
def preprocess_inputs(
|
| 1337 |
+
self,
|
| 1338 |
+
messages: List[Union[str, Dict]],
|
| 1339 |
+
min_pixels=448 * 448,
|
| 1340 |
+
max_pixels=1792 * 1792,
|
| 1341 |
+
add_generation_prompt=True,
|
| 1342 |
+
enable_thinking=False
|
| 1343 |
+
):
|
| 1344 |
+
text = self.text_tokenizer.apply_chat_template(
|
| 1345 |
+
messages,
|
| 1346 |
+
tokenize=False,
|
| 1347 |
+
add_generation_prompt=add_generation_prompt,
|
| 1348 |
+
enable_thinking=enable_thinking
|
| 1349 |
+
)
|
| 1350 |
+
input_ids = self._tokenize_with_visual_placeholder(text)
|
| 1351 |
+
images = []
|
| 1352 |
+
videos = []
|
| 1353 |
+
for message in messages:
|
| 1354 |
+
content = message["content"]
|
| 1355 |
+
if isinstance(content, list):
|
| 1356 |
+
images.extend([item["image"] for item in content if item.get("image") is not None])
|
| 1357 |
+
videos.extend([item["video"] for item in content if item.get("video") is not None])
|
| 1358 |
+
if images and videos:
|
| 1359 |
+
raise ValueError(
|
| 1360 |
+
"Multiple visual input data types detected (both image and video provided). "
|
| 1361 |
+
"This model supports only one type of visual input data at a time. "
|
| 1362 |
+
"Please provide either image or video, but not both."
|
| 1363 |
+
)
|
| 1364 |
+
|
| 1365 |
+
pixel_values, grid_thws = None, None
|
| 1366 |
+
if images:
|
| 1367 |
+
pixel_values, grid_thws = zip(
|
| 1368 |
+
*(self.visual_tokenizer.preprocess(image=image, min_pixels=min_pixels, max_pixels=max_pixels)
|
| 1369 |
+
for image in images)
|
| 1370 |
+
)
|
| 1371 |
+
input_ids = self._merge_inputs(
|
| 1372 |
+
input_ids, IMAGE_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[0], INDICATOR_IDS[1]
|
| 1373 |
+
)
|
| 1374 |
+
pixel_values = torch.cat(pixel_values, dim=0)
|
| 1375 |
+
grid_thws = torch.cat(grid_thws, dim=0)
|
| 1376 |
+
elif videos:
|
| 1377 |
+
assert len(videos) == 1, "only support single video"
|
| 1378 |
+
pixel_values, grid_thws = self.visual_tokenizer.preprocess(
|
| 1379 |
+
video=videos[0], min_pixels=min_pixels, max_pixels=max_pixels
|
| 1380 |
+
)
|
| 1381 |
+
input_ids = self._merge_inputs(
|
| 1382 |
+
input_ids, VIDEO_PLACEHOLDER_ID, grid_thws, INDICATOR_IDS[2], INDICATOR_IDS[3]
|
| 1383 |
+
)
|
| 1384 |
+
|
| 1385 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0)
|
| 1386 |
+
|
| 1387 |
+
return input_ids, pixel_values, grid_thws
|
| 1388 |
+
|
| 1389 |
+
def generate(
|
| 1390 |
+
self,
|
| 1391 |
+
inputs: Optional[torch.Tensor] = None,
|
| 1392 |
+
**kwargs,
|
| 1393 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 1394 |
+
attention_mask = torch.ne(inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 1395 |
+
inputs_embeds = self.merge_multimodal(
|
| 1396 |
+
input_ids=inputs,
|
| 1397 |
+
pixel_values=kwargs.pop('pixel_values', None),
|
| 1398 |
+
grid_thws=kwargs.pop('grid_thws', None)
|
| 1399 |
+
)
|
| 1400 |
+
enable_thinking = kwargs.pop('enable_thinking', False)
|
| 1401 |
+
enable_thinking_budget = kwargs.pop('enable_thinking_budget', False)
|
| 1402 |
+
thinking_budget = kwargs.pop('thinking_budget', 1024)
|
| 1403 |
+
|
| 1404 |
+
if enable_thinking and enable_thinking_budget:
|
| 1405 |
+
actual_max_new_tokens = kwargs['max_new_tokens']
|
| 1406 |
+
kwargs['max_new_tokens'] = thinking_budget
|
| 1407 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1408 |
+
output_ids = generated_ids
|
| 1409 |
+
output_ids_list = generated_ids[0]
|
| 1410 |
+
|
| 1411 |
+
# check if the generation has already finished (151645 is <|im_end|>)
|
| 1412 |
+
if 151645 not in output_ids_list:
|
| 1413 |
+
# check if the thinking process has finished (151668 is </think>)
|
| 1414 |
+
# and prepare the second model input
|
| 1415 |
+
if 151668 not in output_ids_list:
|
| 1416 |
+
early_stopping_text = "\n\nConsidering the limited time by the user, I have to give the solution based on the thinking directly now.\n</think>\n\n"
|
| 1417 |
+
early_stopping_ids = self.text_tokenizer(early_stopping_text, return_tensors="pt", return_attention_mask=False).input_ids.to(inputs.device)
|
| 1418 |
+
input_ids_appendent = torch.cat([output_ids, early_stopping_ids], dim=-1)
|
| 1419 |
+
kwargs['streamer'].put(early_stopping_ids) if 'streamer' in kwargs else None
|
| 1420 |
+
else:
|
| 1421 |
+
input_ids_appendent = output_ids
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
# second generation
|
| 1425 |
+
new_inputs = torch.cat([inputs, input_ids_appendent], dim=-1)
|
| 1426 |
+
attention_mask = torch.ne(new_inputs, self.text_tokenizer.pad_token_id).to(device=inputs.device)
|
| 1427 |
+
inputs_embeds_appendent = self.merge_multimodal(
|
| 1428 |
+
input_ids=input_ids_appendent,
|
| 1429 |
+
pixel_values=None,
|
| 1430 |
+
grid_thws=None
|
| 1431 |
+
)
|
| 1432 |
+
new_inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_appendent], dim=-2)
|
| 1433 |
+
|
| 1434 |
+
kwargs['max_new_tokens'] = inputs_embeds.size(-2) + actual_max_new_tokens - new_inputs_embeds.size(-2)
|
| 1435 |
+
generated_ids2 = self.llm.generate(inputs=None, inputs_embeds=new_inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1436 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1437 |
+
return torch.cat([input_ids_appendent, generated_ids2], dim=-1)
|
| 1438 |
+
|
| 1439 |
+
else:
|
| 1440 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1441 |
+
return generated_ids
|
| 1442 |
+
|
| 1443 |
+
else:
|
| 1444 |
+
generated_ids = self.llm.generate(inputs=None, inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
|
| 1445 |
+
kwargs['streamer'].manual_end() if 'streamer' in kwargs else None
|
| 1446 |
+
return generated_ids
|
| 1447 |
+
|
| 1448 |
+
AutoConfig.register('siglip2_navit', Siglip2NavitConfig)
|
| 1449 |
+
AutoModel.register(Siglip2NavitConfig, Siglip2NavitModel)
|
| 1450 |
+
|
| 1451 |
+
AutoConfig.register("ovis2_6", Ovis2_6_Config)
|
| 1452 |
+
AutoModelForCausalLM.register(Ovis2_6_Config, Ovis2_6ForCausalLM)
|
| 1453 |
+
|
| 1454 |
+
AutoConfig.register("ovis2_6_moe", Ovis2_6_Moe_Config)
|
| 1455 |
+
AutoModelForCausalLM.register(Ovis2_6_Moe_Config, Ovis2_6_MoeForCausalLM)
|
| 1456 |
+
|
| 1457 |
+
AutoConfig.register("ovis2_6_next", Ovis2_6_Next_Config)
|
| 1458 |
+
AutoModelForCausalLM.register(Ovis2_6_Next_Config, Ovis2_6_NextForCausalLM)
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_convert_rgb": null,
|
| 3 |
+
"do_normalize": true,
|
| 4 |
+
"do_rescale": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.5,
|
| 8 |
+
0.5,
|
| 9 |
+
0.5
|
| 10 |
+
],
|
| 11 |
+
"image_processor_type": "SiglipImageProcessor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.5,
|
| 14 |
+
0.5,
|
| 15 |
+
0.5
|
| 16 |
+
],
|
| 17 |
+
"processor_class": "SiglipProcessor",
|
| 18 |
+
"resample": 2,
|
| 19 |
+
"rescale_factor": 0.00392156862745098,
|
| 20 |
+
"size": {
|
| 21 |
+
"height": 512,
|
| 22 |
+
"width": 512
|
| 23 |
+
}
|
| 24 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
|
| 3 |
+
size 11422654
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
"additional_special_tokens": [
|
| 215 |
+
"<|im_start|>",
|
| 216 |
+
"<|im_end|>",
|
| 217 |
+
"<|object_ref_start|>",
|
| 218 |
+
"<|object_ref_end|>",
|
| 219 |
+
"<|box_start|>",
|
| 220 |
+
"<|box_end|>",
|
| 221 |
+
"<|quad_start|>",
|
| 222 |
+
"<|quad_end|>",
|
| 223 |
+
"<|vision_start|>",
|
| 224 |
+
"<|vision_end|>",
|
| 225 |
+
"<|vision_pad|>",
|
| 226 |
+
"<|image_pad|>",
|
| 227 |
+
"<|video_pad|>",
|
| 228 |
+
"<image>",
|
| 229 |
+
"<video>",
|
| 230 |
+
"<ovis_visual_atom>",
|
| 231 |
+
"<ovis_image_start>",
|
| 232 |
+
"<ovis_image_end>",
|
| 233 |
+
"<ovis_video_start>",
|
| 234 |
+
"<ovis_video_end>"
|
| 235 |
+
],
|
| 236 |
+
"bos_token": null,
|
| 237 |
+
"chat_template": "{%- for message in messages %}{{- '<|im_start|>' + message.role + '\n'}}{%- if message.role == 'system' or message.role == 'user' %}{%- if message.content is string %}{{- message.content }}{%- else %}{%- for item in message.content %}{%- if item.type == 'text' and 'text' in item %}{{- item.text }}{%- elif item.type == 'image' %}{{- '<image>'}}{%- elif item.type == 'video' %}{{- '<video>'}}{%- else %}{{- raise_exception('Invalid content type. Supported types for system and user are text, image, video.')}}{%- endif %}{%- if not loop.last %}{{- '\n'}}{%- endif %}{%- endfor %}{%- endif %}{%- elif message.role == 'assistant' %}{%- set content = '' %}{%- if message.content is string %}{%- set content = message.content %}{%- else %}{%- set ns = namespace(content='') -%}{%- for item in message.content %}{%- if item.type == 'text' and 'text' in item %}{%- set ns.content = ns.content ~ item.text %}{%- else %}{{- raise_exception('Invalid content type. Supported type for assistant is text.')}}{%- endif %}{%- endfor %}{%- set content = ns.content -%}{%- endif %}{%- set content = content.split('</think>')[-1].lstrip('\n') %}{{- content }}{%- else %}{{- raise_exception('Invalid role. Supported roles are system, user, assistant.')}}{%- endif %}{{- '<|im_end|>\n'}}{%- endfor %}{%- if add_generation_prompt %}{{- '<|im_start|>assistant\n' }}{%- if enable_thinking is defined and enable_thinking is false %}{{- '<think>\n\n</think>\n\n' }}{%- endif %}{%- endif %}",
|
| 238 |
+
"clean_up_tokenization_spaces": false,
|
| 239 |
+
"eos_token": "<|im_end|>",
|
| 240 |
+
"errors": "replace",
|
| 241 |
+
"extra_special_tokens": {},
|
| 242 |
+
"model_max_length": 131072,
|
| 243 |
+
"pad_token": "<|endoftext|>",
|
| 244 |
+
"split_special_tokens": false,
|
| 245 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 246 |
+
"unk_token": null
|
| 247 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|