from transformers import (
AutoTokenizer,
Gemma4ForConditionalGeneration,
)
def generate_vlm_model(output_dir="./tiny-random-gemma4-31B"):
from transformers import AutoConfig, AutoProcessor, AutoTokenizer, Gemma4ForConditionalGeneration
model_id = "google/gemma-4-31B-it"
config = AutoConfig.from_pretrained(model_id)
# Text config
config.text_config.global_head_dim = 4
config.text_config.head_dim = 4
config.text_config.hidden_size = 32
config.text_config.hidden_size_per_layer_input = 0
config.text_config.num_hidden_layers = 2
config.text_config.layer_types = ["sliding_attention", "full_attention"]
config.text_config.num_kv_shared_layers = 0
config.text_config.intermediate_size = 64
config.text_config.dtype = "float32"
# Vision config
config.vision_config.head_dim = 4
config.vision_config.hidden_size = 8
config.vision_config.intermediate_size = 32
config.vision_config.num_hidden_layers = 1
config.vision_config.num_key_value_heads = 2
model = Gemma4ForConditionalGeneration(config)
model.eval()
model.save_pretrained(str(output_dir))
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.save_pretrained(str(output_dir))
processor = AutoProcessor.from_pretrained(model_id)
processor.save_pretrained(str(output_dir))
return model
if __name__ == "__main__":
generate_vlm_model()
- Downloads last month
- -
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support