补充上次提交

This commit is contained in:
qhy
2026-02-11 16:24:40 +08:00
parent 352a79035f
commit f386a5810b
4 changed files with 26 additions and 15 deletions

View File

@@ -572,9 +572,9 @@ def run_inference(args: argparse.Namespace, gpu_num: int, gpu_no: int) -> None:
print(f">>> Prepared model saved ({os.path.getsize(prepared_path) / 1024**3:.1f} GB).")
# ---- BF16: only convert the diffusion backbone, keep VAE/CLIP/embedder in FP32 ----
model.model.to(torch.bfloat16)
model.model.diffusion_model.dtype = torch.bfloat16
print(">>> Diffusion backbone (model.model) converted to BF16.")
model.model.to(torch.float16)
model.model.diffusion_model.dtype = torch.float16
print(">>> Diffusion backbone (model.model) converted to FP16.")
# Build normalizer (always needed, independent of model loading path)
logging.info("***** Configing Data *****")