nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-channel_weights_tensor_act_fp8-BitMaskCompressed
5B
•
Updated
•
1
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-tensor_weights_per_token_dynamic_act_fp8-BitMaskCompressed
5B
•
Updated
•
1
nm-testing/llama2.c-stories15M-pruned_50.2of4-uncompressed-tensor_weights_tensor_act_fp8-BitMaskCompressed
22.2M
•
Updated
nm-testing/llama2.c-stories15M-pruned_50.2of4-uncompressed
24.4M
•
Updated
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-W8A8-testing
8B
•
Updated
•
211
nm-testing/TinyLlama-1.1B-Chat-v1.0-INT8-Static-testing
1B
•
Updated
•
13
nm-testing/TinyLlama-1.1B-Chat-v1.0-INT8-Dynamic-IA-Per-Tensor-Weight-testing
1B
•
Updated
•
10
nm-testing/TinyLlama-1.1B-Chat-v1.0-INT8-Dynamic-IA-Per-Channel-Weight-testing
1B
•
Updated
•
12
nm-testing/TinyLlama-1.1B-Chat-v1.0-2of4-Sparse-Dense-Compressor
1B
•
Updated
•
23
nm-testing/llama-3-fp8-2of4-dynamic-uncompressed
8B
•
Updated
•
2
nm-testing/Llama-3.3-70B-Instruct-FP8-dynamic
71B
•
Updated
•
214
•
13
nm-testing/Meta-Llama-3-8B-Instruct-FP8-Dynamic-IA-Per-Tensor-Weight-testing
8B
•
Updated
•
8
nm-testing/Meta-Llama-3-8B-Instruct-FP8-Static-Per-Tensor-testing
8B
•
Updated
•
8
nm-testing/Meta-Llama-3-8B-Instruct-FP8-Static-testing
8B
•
Updated
•
10
nm-testing/Qwen2-VL-2B-Instruct-FP8-dynamic
2B
•
Updated
•
1
nm-testing/Meta-Llama-3-8B-Instruct-FP8-Dynamic-2of4-testing
8B
•
Updated
•
12
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-Dynamic-testing
8B
•
Updated
•
219
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-Dynamic-Channel-BitMaskCompressed
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-Dynamic-Channel
5B
•
Updated
•
1
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-Dynamic
5B
•
Updated
•
1
nm-testing/TinyLlama-1.1B-Chat-v1.0-pruned_50.2of4-FP8-compressed
0.6B
•
Updated
•
1
nm-testing/TinyLlama-1.1B-Chat-v1.0-pruned_50.2of4-FP8-uncompressed
nm-testing/Llama-3.1-8B-gsm8k-quantized.w4a16-noactorder
2B
•
Updated
•
1
nm-testing/Sparse-Llama-3.1-8B-evolcodealpaca-pruned.2of4-quantized.w4a16-test
2B
•
Updated
•
1
nm-testing/pixtral-12b-W4A16
3B
•
Updated
nm-testing/pixtral-12b-W8A8
13B
•
Updated
•
2
nm-testing/Llama-3.2-90B-Vision-Instruct-quantized.w4a16
14B
•
Updated
•
1
nm-testing/Llama-3.2-11B-Vision-Instruct-quantized.w8a8
11B
•
Updated
nm-testing/Llama-3.1-8B-gsm8k-quantized.w4a16
2B
•
Updated
nm-testing/debug-gsm8k-llmcompressor_packed
2B
•
Updated