SmolVLM2-256M-Video-Instruct-openvino-8bit-vision-encoder-static
/
openvino_text_embeddings_model.xml
| <net name="Model3" version="11"> | |
| <layers> | |
| <layer id="0" name="input" type="Parameter" version="opset1"> | |
| <data shape="?,?" element_type="i64" /> | |
| <output> | |
| <port id="0" precision="I64" names="input"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="1" name="self.weight" type="Const" version="opset1"> | |
| <data element_type="u8" shape="49280, 576" offset="0" size="28385280" /> | |
| <output> | |
| <port id="0" precision="U8"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="2" name="Convert_891888" type="Convert" version="opset1"> | |
| <data destination_type="f16" /> | |
| <input> | |
| <port id="0" precision="U8"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="1" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="3" name="self.weight/zero_point" type="Const" version="opset1"> | |
| <data element_type="u8" shape="49280, 1" offset="28385280" size="49280" /> | |
| <output> | |
| <port id="0" precision="U8"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="4" name="Convert_891891" type="Convert" version="opset1"> | |
| <data destination_type="f16" /> | |
| <input> | |
| <port id="0" precision="U8"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="1" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="5" name="self.weight/zero_point/subtract" type="Subtract" version="opset1"> | |
| <data auto_broadcast="numpy" /> | |
| <input> | |
| <port id="0" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| <port id="1" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="2" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="6" name="self.weight/scale" type="Const" version="opset1"> | |
| <data element_type="f16" shape="49280, 1" offset="28434560" size="98560" /> | |
| <output> | |
| <port id="0" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="7" name="self.weight/fq_weights_0" type="Multiply" version="opset1"> | |
| <data auto_broadcast="numpy" /> | |
| <input> | |
| <port id="0" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| <port id="1" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>1</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="2" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="8" name="self.weight/fq_weights_0/convert" type="Convert" version="opset1"> | |
| <data destination_type="f32" /> | |
| <input> | |
| <port id="0" precision="FP16"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="1" precision="FP32"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="9" name="aten::embedding/Convert" type="Convert" version="opset1"> | |
| <data destination_type="i32" /> | |
| <input> | |
| <port id="0" precision="I64"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| </port> | |
| </input> | |
| <output> | |
| <port id="1" precision="I32"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="10" name="aten::embedding/Constant" type="Const" version="opset1"> | |
| <data element_type="i32" shape="" offset="28533120" size="4" /> | |
| <output> | |
| <port id="0" precision="I32" /> | |
| </output> | |
| </layer> | |
| <layer id="11" name="aten::embedding/Gather" type="Gather" version="opset8"> | |
| <data batch_dims="0" /> | |
| <input> | |
| <port id="0" precision="FP32"> | |
| <dim>49280</dim> | |
| <dim>576</dim> | |
| </port> | |
| <port id="1" precision="I32"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| </port> | |
| <port id="2" precision="I32" /> | |
| </input> | |
| <output> | |
| <port id="3" precision="FP32" names="inputs_embeds"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| <dim>576</dim> | |
| </port> | |
| </output> | |
| </layer> | |
| <layer id="12" name="Result_10181" type="Result" version="opset1" output_names="inputs_embeds"> | |
| <input> | |
| <port id="0" precision="FP32"> | |
| <dim>-1</dim> | |
| <dim>-1</dim> | |
| <dim>576</dim> | |
| </port> | |
| </input> | |
| </layer> | |
| </layers> | |
| <edges> | |
| <edge from-layer="0" from-port="0" to-layer="9" to-port="0" /> | |
| <edge from-layer="1" from-port="0" to-layer="2" to-port="0" /> | |
| <edge from-layer="2" from-port="1" to-layer="5" to-port="0" /> | |
| <edge from-layer="3" from-port="0" to-layer="4" to-port="0" /> | |
| <edge from-layer="4" from-port="1" to-layer="5" to-port="1" /> | |
| <edge from-layer="5" from-port="2" to-layer="7" to-port="0" /> | |
| <edge from-layer="6" from-port="0" to-layer="7" to-port="1" /> | |
| <edge from-layer="7" from-port="2" to-layer="8" to-port="0" /> | |
| <edge from-layer="8" from-port="1" to-layer="11" to-port="0" /> | |
| <edge from-layer="9" from-port="1" to-layer="11" to-port="1" /> | |
| <edge from-layer="10" from-port="0" to-layer="11" to-port="2" /> | |
| <edge from-layer="11" from-port="3" to-layer="12" to-port="0" /> | |
| </edges> | |
| <rt_info> | |
| <Runtime_version value="2025.3.0-19807-44526285f24-releases/2025/3" /> | |
| <conversion_parameters> | |
| <framework value="pytorch" /> | |
| <is_python_object value="True" /> | |
| </conversion_parameters> | |
| <nncf> | |
| <friendly_names_were_updated value="True" /> | |
| <version value="2.18.0" /> | |
| <weight_compression> | |
| <advanced_parameters value="{'statistics_path': None, 'lora_adapter_rank': 256, 'group_size_fallback_mode': 'ignore', 'min_adjusted_group_size': 16, 'awq_params': {'subset_size': 32, 'percent_to_apply': 0.002, 'alpha_min': 0.0, 'alpha_max': 1.0, 'steps': 100, 'prefer_data_aware_scaling': True}, 'scale_estimation_params': {'subset_size': 64, 'initial_steps': 5, 'scale_steps': 5, 'weight_penalty': -1.0}, 'gptq_params': {'damp_percent': 0.1, 'block_size': 128, 'subset_size': 128}, 'lora_correction_params': {'adapter_rank': 8, 'num_iterations': 3, 'apply_regularization': True, 'subset_size': 128, 'use_int8_adapters': True}, 'backend_params': {}, 'codebook': None}" /> | |
| <all_layers value="False" /> | |
| <awq value="False" /> | |
| <backup_mode value="int8_asym" /> | |
| <compression_format value="dequantize" /> | |
| <gptq value="False" /> | |
| <group_size value="-1" /> | |
| <ignored_scope value="[]" /> | |
| <lora_correction value="False" /> | |
| <mode value="int8_asym" /> | |
| <ratio value="1.0" /> | |
| <scale_estimation value="False" /> | |
| <sensitivity_metric value="weight_quantization_error" /> | |
| </weight_compression> | |
| </nncf> | |
| <optimum> | |
| <nncf_version value="2.18.0" /> | |
| <optimum_intel_version value="1.26.0.dev0+f9cff03" /> | |
| <optimum_version value="2.0.0.dev0" /> | |
| <pytorch_version value="2.8.0" /> | |
| <transformers_version value="4.55.4" /> | |
| </optimum> | |
| </rt_info> | |
| </net> | |