当前位置:   article > 正文

stable-diffusion-webui 的模型更新_filenotfounderror: no checkpoints found. when sear

filenotfounderror: no checkpoints found. when searching for checkpoints, loo

shared.py和sd_models.py中

shared.py:

  1. options_templates.update(options_section(('sd', "Stable Diffusion"), {
  2. "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
  3. "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
  4. "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
  5. "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
  6. "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
  7. "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
  8. "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
  9. "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
  10. "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
  11. "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
  12. "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}),
  13. "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
  14. "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
  15. "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
  16. "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
  17. "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
  18. "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
  19. "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
  20. }))

模型的列表在list_checkpoint_tiles()中,更新在refresh_checkpoints中,282行

  1. def list_checkpoint_tiles():
  2. import modules.sd_models
  3. return modules.sd_models.checkpoint_tiles()
  4. def refresh_checkpoints():
  5. import modules.sd_models
  6. return modules.sd_models.list_models()

点击选中更改,sd_model.py的519行:

  1. def reload_model_weights(sd_model=None, info=None):
  2. from modules import lowvram, devices, sd_hijack
  3. checkpoint_info = info or select_checkpoint()
  4. if not sd_model:
  5. sd_model = model_data.sd_model
  6. if sd_model is None: # previous model load failed
  7. current_checkpoint_info = None
  8. else:
  9. current_checkpoint_info = sd_model.sd_checkpoint_info
  10. if sd_model.sd_model_checkpoint == checkpoint_info.filename:
  11. return
  12. sd_unet.apply_unet("None")
  13. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  14. lowvram.send_everything_to_cpu()
  15. else:
  16. sd_model.to(devices.cpu)
  17. sd_hijack.model_hijack.undo_hijack(sd_model)
  18. timer = Timer()
  19. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  20. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  21. timer.record("find config")
  22. if sd_model is None or checkpoint_config != sd_model.used_config:
  23. del sd_model
  24. load_model(checkpoint_info, already_loaded_state_dict=state_dict)
  25. return model_data.sd_model
  26. try:
  27. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  28. except Exception:
  29. print("Failed to load checkpoint, restoring previous")
  30. load_model_weights(sd_model, current_checkpoint_info, None, timer)
  31. raise
  32. finally:
  33. sd_hijack.model_hijack.hijack(sd_model)
  34. timer.record("hijack")
  35. script_callbacks.model_loaded_callback(sd_model)
  36. timer.record("script callbacks")
  37. if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
  38. sd_model.to(devices.device)
  39. timer.record("move model to device")
  40. print(f"Weights loaded in {timer.summary()}.")
  41. return sd_model

其中sd_model.py的165行中的select_checkpoint

  1. def select_checkpoint():
  2. """Raises `FileNotFoundError` if no checkpoints are found."""
  3. model_checkpoint = shared.opts.sd_model_checkpoint
  4. checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
  5. if checkpoint_info is not None:
  6. return checkpoint_info
  7. if len(checkpoints_list) == 0:
  8. error_message = "No checkpoints found. When searching for checkpoints, looked at:"
  9. if shared.cmd_opts.ckpt is not None:
  10. error_message += f"\n - file {os.path.abspath(shared.cmd_opts.ckpt)}"
  11. error_message += f"\n - directory {model_path}"
  12. if shared.cmd_opts.ckpt_dir is not None:
  13. error_message += f"\n - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
  14. error_message += "Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations."
  15. raise FileNotFoundError(error_message)
  16. checkpoint_info = next(iter(checkpoints_list.values()))
  17. if model_checkpoint is not None:
  18. print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
  19. return checkpoint_info

实际如果在代码中想要更改权重:

 reload_model_weights(model_checkpoint=input_json.get('model', "chilloutmix_NiPrunedFp32Fix.safetensors"))

即可。

初始化在webui.py的270行

modules.sd_models.list_models()

中,主要Stable-diffusion下的模型都是提前初始化,在整个工程启动前,sd_model.py的113行

  1. def list_models():
  2. checkpoints_list.clear()
  3. checkpoint_alisases.clear()
  4. cmd_ckpt = shared.cmd_opts.ckpt
  5. if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
  6. model_url = None
  7. else:
  8. model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
  9. model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
  10. if os.path.exists(cmd_ckpt):
  11. checkpoint_info = CheckpointInfo(cmd_ckpt)
  12. checkpoint_info.register()
  13. shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
  14. elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
  15. print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
  16. for filename in sorted(model_list, key=str.lower):
  17. checkpoint_info = CheckpointInfo(filename)
  18. checkpoint_info.register()

被注册在checkpoint_info中

webui.py的311行,加载模型:

  1. def load_model():
  2. """
  3. Accesses shared.sd_model property to load model.
  4. After it's available, if it has been loaded before this access by some extension,
  5. its optimization may be None because the list of optimizaers has neet been filled
  6. by that time, so we apply optimization again.
  7. """
  8. shared.sd_model # noqa: B018
  9. if modules.sd_hijack.current_optimizer is None:
  10. modules.sd_hijack.apply_optimizations()
  11. Thread(target=load_model).start()

在shared.py的714行中

  1. class Shared(sys.modules[__name__].__class__):
  2. """
  3. this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
  4. at program startup.
  5. """
  6. sd_model_val = None
  7. @property
  8. def sd_model(self):
  9. import modules.sd_models
  10. return modules.sd_models.model_data.get_sd_model()
  11. @sd_model.setter
  12. def sd_model(self, value):
  13. import modules.sd_models
  14. modules.sd_models.model_data.set_sd_model(value)
  15. sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead
  16. sys.modules[__name__].__class__ = Shared

sd_model的getter/setter属性,在sd_model.py的406行

  1. class SdModelData:
  2. def __init__(self):
  3. self.sd_model = None
  4. self.was_loaded_at_least_once = False
  5. self.lock = threading.Lock()
  6. def get_sd_model(self):
  7. if self.was_loaded_at_least_once:
  8. return self.sd_model
  9. if self.sd_model is None:
  10. with self.lock:
  11. if self.sd_model is not None or self.was_loaded_at_least_once:
  12. return self.sd_model
  13. try:
  14. load_model()
  15. except Exception as e:
  16. errors.display(e, "loading stable diffusion model", full_traceback=True)
  17. print("", file=sys.stderr)
  18. print("Stable diffusion model failed to load", file=sys.stderr)
  19. self.sd_model = None
  20. return self.sd_model
  21. def set_sd_model(self, v):
  22. self.sd_model = v

在sd_model.py的438行,加载模型:

  1. def load_model(checkpoint_info=None, already_loaded_state_dict=None):
  2. from modules import lowvram, sd_hijack
  3. checkpoint_info = checkpoint_info or select_checkpoint()
  4. if model_data.sd_model:
  5. sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
  6. model_data.sd_model = None
  7. gc.collect()
  8. devices.torch_gc()
  9. do_inpainting_hijack()
  10. timer = Timer()
  11. if already_loaded_state_dict is not None:
  12. state_dict = already_loaded_state_dict
  13. else:
  14. state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
  15. checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
  16. clip_is_included_into_sd = sd1_clip_weight in state_dict or sd2_clip_weight in state_dict
  17. timer.record("find config")
  18. sd_config = OmegaConf.load(checkpoint_config)
  19. repair_config(sd_config)
  20. timer.record("load config")
  21. print(f"Creating model from config: {checkpoint_config}")
  22. sd_model = None
  23. try:
  24. with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
  25. sd_model = instantiate_from_config(sd_config.model)
  26. except Exception:
  27. pass
  28. if sd_model is None:
  29. print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
  30. sd_model = instantiate_from_config(sd_config.model)
  31. sd_model.used_config = checkpoint_config
  32. timer.record("create model")
  33. load_model_weights(sd_model, checkpoint_info, state_dict, timer)
  34. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  35. lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
  36. else:
  37. sd_model.to(shared.device)
  38. timer.record("move model to device")
  39. sd_hijack.model_hijack.hijack(sd_model)
  40. timer.record("hijack")
  41. sd_model.eval()
  42. model_data.sd_model = sd_model
  43. model_data.was_loaded_at_least_once = True
  44. sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
  45. timer.record("load textual inversion embeddings")
  46. script_callbacks.model_loaded_callback(sd_model)
  47. timer.record("scripts callbacks")
  48. with devices.autocast(), torch.no_grad():
  49. sd_model.cond_stage_model_empty_prompt = sd_model.cond_stage_model([""])
  50. timer.record("calculate empty prompt")
  51. print(f"Model loaded in {timer.summary()}.")
  52. return sd_model

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/笔触狂放9/article/detail/331152?site
推荐阅读
相关标签
  

闽ICP备14008679号