transformer only version of Lightricks/LTX-2.3-fp8

Now, Kijai made fp8_input_scaled, so this repo will be removed soon.

The rest of the parts can be found in Kijai/LTX2.3_comfy.

How to reproduce

import sys
import json
import torch
from safetensors import safe_open
from safetensors.torch import save_file

def cut_safetensors(input_path, output_path):
    with safe_open(input_path, framework="pt", device="cpu") as f:
        metadata = f.metadata()

        config = json.loads(metadata.get('config', '{}'))
        for key in ['vae', 'audio_vae', 'vocoder']:
            if key in config:
                del config[key]
        metadata['config'] = json.dumps(config)

        quant_meta = json.loads(metadata.get('_quantization_metadata', '{"layers": {}}'))
        quant_layers = quant_meta.get("layers", {})
        del metadata['_quantization_metadata']

        new_state_dict = {}
        prefix = "model.diffusion_model."

        for key in f.keys():
            if key.startswith(prefix):
                new_state_dict[key] = f.get_tensor(key)
                base_key = key.replace(".weight", "")
                if base_key in quant_layers:
                    quant_info = quant_layers[base_key]
                    json_data = json.dumps(quant_info).encode("utf-8")
                    new_tensor = torch.tensor(list(json_data), dtype=torch.uint8)
                    new_state_dict[f"{base_key}.comfy_quant"] = new_tensor

        save_file(new_state_dict, output_path, metadata=metadata)

input_path, output_path = sys.argv[1:3]

if __name__ == "__main__":
    cut_safetensors(input_path, output_path)
Downloads last month
88
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for Bedovyy/ltx2.3_transformer_only_fp8

Merge model
this model