当前位置:   article > 正文

SD重绘应用:加载多个Controlnet和Lora_sd controlnet怎么加载模型

sd controlnet怎么加载模型

需要下载diffusers库和transformer>0.34, peft

加载单个controlnet

1.1使用SD加载controlnet

#####使用SD加载controlnet
import torch
from diffusers import (
    ControlNetModel,
    StableDiffusionControlNetPipeline,
    UniPCMultistepScheduler,
    AutoPipelineForImage2Image,
)

####加载linerat模型为例:
def load_lineart():
        # load control net and stable diffusion v1-5
    checkpoint = 'ControlNet-1-1-preview/control_v11p_sd15_lineart'#
    processor = LineartDetector.from_pretrained("lllyasviel/Annotators")#"lllyasviel/Annotators"
    ###加载controlnet
    controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
    processor.to(device)

    return controlnet,processor
def load_controlnet(controlnet):
    pipe = StableDiffusionControlNetPipeline.from_pretrained(
        'runwayml/stable-diffusion-v1-5', controlnet=controlnet, torch_dtype=torch.float16
    )
    pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
    pipe.to(device)
    return pipe
def gen_img(pipe,processor,prompt,image_path):
	####生成control_img
	image = Image.open(image_path).resize((512,512))
	control_image=processor(image)
	image = pipe(
	prompt,num_inference_steps=40,
	negative_prompt='human,people',
	guidance_scale=4.5,image=control_image
	).images[0]
	#######num_inference_steps=40表示去噪步数
    gen_path =  f'./gen_img.png'
    image.save(gen_path)
    return image,gen_path
   
  prompt='a dog'
  image_path='./image.png'
  controlnet,processor=load_lineart()
  pipe=load_controlnet(controlnet)
  gen_image,gen_path=gen_img(pipe,processor,prompt,image_path)
  print('重绘完成')
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46

1.2 使用图生图加载controlnet

import torch
from diffusers import (
    ControlNetModel,
    StableDiffusionControlNetPipeline,
    UniPCMultistepScheduler,
    AutoPipelineForImage2Image,
)

####加载linerat模型为例:
def load_lineart():
        # load control net and stable diffusion v1-5
    checkpoint = 'ControlNet-1-1-preview/control_v11p_sd15_lineart'#
    processor = LineartDetector.from_pretrained("lllyasviel/Annotators")#"lllyasviel/Annotators"
    ###加载controlnet
    controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
    processor.to(device)

    return controlnet,processor
  #####只有这里加载不一样####
 def load_pipe(controlnet):
	model='runwayml/stable-diffusion-v1-5'
	# vae=AutoencoderKL.from_pretrained('./model/orangemix_vae/', torch_dtype=torch.float16)
	pipe=AutoPipelineForImage2Image.from_pretrained(model, controlnet=controlnet,torch_dtype=torch.float16, variant="fp16")#,vae=vae
	pipe.to(device)
	return pipe
def gen_img(pipe,processor,prompt,image_path):
	####生成control_img
	image = Image.open(image_path).resize((512,512))
	control_image=processor(image)
	image = pipe(
	prompt,num_inference_steps=40,
	negative_prompt='human,people',
	guidance_scale=4.5,image=control_image
	).images[0]
	#######num_inference_steps=40表示去噪步数
    gen_path =  f'./gen_img.png'
    image.save(gen_path)
    return image,gen_path
   
  prompt='a dog'
  image_path='./image.png'
  controlnet,processor=load_lineart()
  pipe=load_controlnet(controlnet)
  gen_image,gen_path=gen_img(pipe,processor,prompt,image_path)
  print('重绘完成')
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45

加载多个controlnet

def load_mutl_controlnet():
	controlnet_canny=ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True)
	processor = LineartDetector.from_pretrained("lllyasviel/Annotators")#
    controlnet_lineart = ControlNetModel.from_pretrained("ControlNet-1-1-preview/control_v11p_sd15_lineart", 
    				torch_dtype=torch.float16)
    return controlnet_canny,processor, controlnet_lineart 
 def load_pipe(control_canny.control_lieart):
 	controlnets=[controlnet_canny,controlnet_lineart]
 	pipe = AutoPipelineForImage2Image.from_pretrained(model, controlnet=controlnets,torch_dtype=torch.float16, variant="fp16")
 	return pipe
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

加载Lora

def load_loras(pipe):
    ####加载多个lora
    pipe.load_lora_weights('./model/',weight_name="GameIconResearch_cartoon2_Lora.safetensors", adapter_name="lora1")
    pipe.load_lora_weights('./model/',weight_name="animeLineartStyle_v20Offset.safetensors", adapter_name="lora2")
    pipe.load_lora_weights('./model/',weight_name="GameIconResearch_3d_Lora.safetensors", adapter_name="lora3")
    pipe.set_adapters(["lora1","lora2","lora3"], adapter_weights=[0.6,1,0.5])####adapter_weights为Lora权重,可以根据需要选择加载一个或多个Lora
    pipe.to(device)
    return pipe
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
'
运行
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/正经夜光杯/article/detail/960785
推荐阅读
  

闽ICP备14008679号