当前位置:   article > 正文

Stable Diffusion | Gradio界面设计及webUI API调用_sd api schedule type

sd api schedule type

本文基于webUI API编写了类似于webUI的Gradio交互式界面,支持文生图/图生图(SD1.x,SD2.x,SDXL),Embedding,Lora,X/Y/Z Plot,ADetailer、ControlNet,超分放大(Extras),图片信息读取(PNG Info)。

1. 在线体验

本文代码已部署到百度飞桨AI Studio平台,以供大家在线体验Stable Diffusion ComfyUI/webUI 原版界面及自制Gradio界面。

项目链接:Stable Diffusion webUI 在线体验

2. 自制Gradio界面展示

文生图界面:

Adetailer 设置界面:

ControlNet 设置界面:

X/Y/Z Plot 设置界面:

图生图界面:

图片放大界面:

图片信息读取界面:

3. Gradio界面设计及webUI API调用

  1. import base64
  2. import datetime
  3. import io
  4. import os
  5. import re
  6. import subprocess
  7. import gradio as gr
  8. import requests
  9. from PIL import Image, PngImagePlugin
  10. design_mode = 1
  11. save_images = "Yes"
  12. url = "http://127.0.0.1:7860"
  13. if design_mode == 0:
  14. cmd = "netstat -tulnp"
  15. netstat_output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout.splitlines()
  16. for i in netstat_output:
  17. if "stable-diffus" in i:
  18. port = int(re.findall(r'\d+', i)[6])
  19. url = f"http://127.0.0.1:{port}"
  20. output_dir = os.getcwd() + "/output/" + datetime.date.today().strftime("%Y-%m-%d")
  21. os.makedirs(output_dir, exist_ok=True)
  22. os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
  23. default = {
  24. "prompt": "(best quality:1), (high quality:1), detailed/(extreme, highly, ultra/), realistic, 1girl/(beautiful, delicate, perfect/)",
  25. "negative_prompt": "(worst quality:1), (low quality:1), (normal quality:1), lowres, signature, blurry, watermark, duplicate, bad link, plump, bad anatomy, extra arms, extra digits, missing finger, bad hands, bad feet, deformed, error, mutation, text",
  26. "clip_skip": 1,
  27. "width": 512,
  28. "height": 768,
  29. "size_step": 64,
  30. "steps": 20,
  31. "cfg": 7,
  32. "ad_nums": 2,
  33. "ad_model": ["face_yolov8n.pt", "hand_yolov8n.pt"],
  34. "cn_nums": 3,
  35. "cn_type": "Canny",
  36. "gallery_height": 600,
  37. "lora_weight": 0.8,
  38. "hidden_models": ["stable_cascade_stage_c", "stable_cascade_stage_b", "svd_xt_1_1", "control_v11p_sd15_canny", "control_v11f1p_sd15_depth", "control_v11p_sd15_openpose"]
  39. }
  40. samplers = []
  41. response = requests.get(url=f"{url}/sdapi/v1/samplers").json()
  42. for i in range(len(response)):
  43. samplers.append(response[i]["name"])
  44. schedulers = []
  45. response = requests.get(url=f"{url}/sdapi/v1/schedulers").json()
  46. for i in range(len(response)):
  47. schedulers.append(response[i]["label"])
  48. upscalers = []
  49. response = requests.get(url=f"{url}/sdapi/v1/upscalers").json()
  50. for i in range(len(response)):
  51. upscalers.append(response[i]["name"])
  52. sd_models = []
  53. sd_models_list = {}
  54. response = requests.get(url=f"{url}/sdapi/v1/sd-models").json()
  55. for i in range(len(response)):
  56. path, sd_model = os.path.split(response[i]["title"])
  57. sd_model_name, sd_model_extension = os.path.splitext(sd_model)
  58. if not sd_model_name in default["hidden_models"]:
  59. sd_models.append(sd_model)
  60. sd_models_list[sd_model] = response[i]["title"]
  61. sd_models = sorted(sd_models)
  62. sd_vaes = ["Automatic", "None"]
  63. response = requests.get(url=f"{url}/sdapi/v1/sd-vae").json()
  64. for i in range(len(response)):
  65. sd_vaes.append(response[i]["model_name"])
  66. embeddings = []
  67. response = requests.get(url=f"{url}/sdapi/v1/embeddings").json()
  68. for key in response["loaded"]:
  69. embeddings.append(key)
  70. extensions = []
  71. response = requests.get(url=f"{url}/sdapi/v1/extensions").json()
  72. for i in range(len(response)):
  73. extensions.append(response[i]["name"])
  74. loras = []
  75. loras_name = {}
  76. loras_activation_text = {}
  77. response = requests.get(url=f"{url}/sdapi/v1/loras").json()
  78. for i in range(len(response)):
  79. lora_name = response[i]["name"]
  80. lora_info = requests.get(url=f"{url}/tacapi/v1/lora-info/{lora_name}").json()
  81. if lora_info and "sd version" in lora_info:
  82. lora_type = lora_info["sd version"]
  83. lora_name_type = f"{lora_name} ({lora_type})"
  84. else:
  85. lora_name_type = f"{lora_name}"
  86. loras.append(lora_name_type)
  87. loras_name[lora_name_type] = lora_name
  88. if "activation text" in loras_activation_text:
  89. loras_activation_text[lora_name_type] = lora_info["activation text"]
  90. xyz_args = {}
  91. xyz_plot_types = {}
  92. last_choice = "Size"
  93. response = requests.get(url=f"{url}/sdapi/v1/script-info").json()
  94. for i in range(len(response)):
  95. if response[i]["name"] == "x/y/z plot":
  96. if response[i]["is_img2img"] == False:
  97. xyz_plot_types["txt2img"] = response[i]["args"][0]["choices"]
  98. choice_index = xyz_plot_types["txt2img"].index(last_choice) + 1
  99. xyz_plot_types["txt2img"] = xyz_plot_types["txt2img"][:choice_index]
  100. else:
  101. xyz_plot_types["img2img"] = response[i]["args"][0]["choices"]
  102. choice_index = xyz_plot_types["img2img"].index(last_choice) + 1
  103. xyz_plot_types["img2img"] = xyz_plot_types["img2img"][:choice_index]
  104. if "adetailer" in extensions:
  105. ad_args = {"txt2img": {}, "img2img": {}}
  106. ad_skip_img2img = False
  107. ad_models = ["None"]
  108. response = requests.get(url=f"{url}/adetailer/v1/ad_model").json()
  109. for key in response["ad_model"]:
  110. ad_models.append(key)
  111. if "sd-webui-controlnet" in extensions:
  112. cn_args = {"txt2img": {}, "img2img": {}}
  113. cn_types = []
  114. cn_types_list = {}
  115. response = requests.get(url=f"{url}/controlnet/control_types").json()
  116. for key in response["control_types"]:
  117. cn_types.append(key)
  118. cn_types_list[key] = response["control_types"][key]
  119. cn_default_type = default["cn_type"]
  120. cn_module_list = cn_types_list[cn_default_type]["module_list"]
  121. cn_model_list = cn_types_list[cn_default_type]["model_list"]
  122. cn_default_option = cn_types_list[cn_default_type]["default_option"]
  123. cn_default_model = cn_types_list[cn_default_type]["default_model"]
  124. def save_image(image, part1, part2):
  125. counter = 1
  126. image_name = f"{part1}-{part2}-{counter}.png"
  127. while os.path.exists(os.path.join(output_dir, image_name)):
  128. counter += 1
  129. image_name = f"{part1}-{part2}-{counter}.png"
  130. image_path = os.path.join(output_dir, image_name)
  131. image_metadata = PngImagePlugin.PngInfo()
  132. for key, value in image.info.items():
  133. if isinstance(key, str) and isinstance(value, str):
  134. image_metadata.add_text(key, value)
  135. image.save(image_path, format="PNG", pnginfo=image_metadata)
  136. def pil_to_base64(image_pil):
  137. buffer = io.BytesIO()
  138. image_pil.save(buffer, format="png")
  139. image_buffer = buffer.getbuffer()
  140. image_base64 = base64.b64encode(image_buffer).decode("utf-8")
  141. return image_base64
  142. def base64_to_pil(image_base64):
  143. image_binary = base64.b64decode(image_base64)
  144. image_pil = Image.open(io.BytesIO(image_binary))
  145. return image_pil
  146. def format_prompt(prompt):
  147. prompt = re.sub(r"\s+,", ",", prompt)
  148. prompt = re.sub(r"\s+", " ", prompt)
  149. prompt = re.sub(",,+", ",", prompt)
  150. prompt = re.sub(",", ", ", prompt)
  151. prompt = re.sub(r"\s+", " ", prompt)
  152. prompt = re.sub(r"^,", "", prompt)
  153. prompt = re.sub(r"^ ", "", prompt)
  154. prompt = re.sub(r" $", "", prompt)
  155. prompt = re.sub(r",$", "", prompt)
  156. prompt = re.sub(": ", ":", prompt)
  157. return prompt
  158. def post_interrupt():
  159. global interrupt
  160. interrupt = True
  161. requests.post(url=f"{url}/sdapi/v1/interrupt").json()
  162. def gr_update_visible(visible):
  163. return gr.update(visible=visible)
  164. def ordinal(n: int) -> str:
  165. d = {1: "st", 2: "nd", 3: "rd"}
  166. return str(n) + ("th" if 11 <= n % 100 <= 13 else d.get(n % 10, "th"))
  167. def add_lora(prompt, lora):
  168. lora_weight = default["lora_weight"]
  169. prompt = re.sub(r"<[^<>]+>", "", prompt)
  170. for elem in loras_activation_text:
  171. prompt = re.sub(loras_activation_text[elem], "", prompt)
  172. prompt = format_prompt(prompt)
  173. for elem in lora:
  174. lora_name = loras_name[elem]
  175. if elem in loras_activation_text:
  176. lora_activation_text = loras_activation_text[elem]
  177. else:
  178. lora_activation_text = ""
  179. if lora_activation_text == "":
  180. prompt = f"{prompt}, <lora:{lora_name}:{lora_weight}>"
  181. else:
  182. prompt = f"{prompt}, <lora:{lora_name}:{lora_weight}> {lora_activation_text}"
  183. return prompt
  184. def add_embedding(negative_prompt, embedding):
  185. for elem in embeddings:
  186. negative_prompt = re.sub(f"{elem},", "", negative_prompt)
  187. negative_prompt = format_prompt(negative_prompt)
  188. for elem in embedding[::-1]:
  189. negative_prompt = f"{elem}, {negative_prompt}"
  190. return negative_prompt
  191. def add_xyz_plot(payload, gen_type):
  192. global xyz_args
  193. if gen_type in xyz_args:
  194. payload["script_name"] = "X/Y/Z plot"
  195. payload["script_args"] = xyz_args[gen_type]
  196. return payload
  197. def xyz_update_args(*args):
  198. gen_type, enable_xyz_plot, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode = args
  199. global xyz_args
  200. x_type = xyz_plot_types[gen_type].index(x_type)
  201. y_type = xyz_plot_types[gen_type].index(y_type)
  202. z_type = xyz_plot_types[gen_type].index(z_type)
  203. args = [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode]
  204. if enable_xyz_plot == True:
  205. xyz_args[gen_type] = args
  206. else:
  207. del xyz_args[gen_type]
  208. def xyz_update_choices(xyz_type):
  209. choices = []
  210. if xyz_type == "Checkpoint name":
  211. choices = sd_models
  212. if xyz_type == "VAE":
  213. choices = sd_vaes
  214. if xyz_type == "Sampler":
  215. choices = samplers
  216. if xyz_type == "Schedule type":
  217. choices = schedulers
  218. if xyz_type == "Hires sampler":
  219. choices = samplers
  220. if xyz_type == "Hires upscaler":
  221. choices = upscalers
  222. if xyz_type == "Always discard next-to-last sigma":
  223. choices = ["False", "True"]
  224. if xyz_type == "SGM noise multiplier":
  225. choices = ["False", "True"]
  226. if xyz_type == "Refiner checkpoint":
  227. choices = sd_models
  228. if xyz_type == "RNG source":
  229. choices = ["GPU", "CPU", "NV"]
  230. if xyz_type == "FP8 mode":
  231. choices = ["Disable", "Enable for SDXL", "Enable"]
  232. if choices == []:
  233. return gr.update(visible=True, value=None), gr.update(visible=False)
  234. else:
  235. return gr.update(visible=False), gr.update(visible=True, choices=choices)
  236. def xyz_blocks(gen_type):
  237. with gr.Blocks() as demo:
  238. with gr.Row():
  239. xyz_gen_type = gr.Textbox(visible=False, value=gen_type)
  240. enable_xyz_plot = gr.Checkbox(label="Enable")
  241. with gr.Row():
  242. x_type = gr.Dropdown(xyz_plot_types[gen_type], label="X type", value=xyz_plot_types[gen_type][1])
  243. x_values = gr.Textbox(label="X values", lines=1)
  244. x_values_dropdown = gr.Dropdown(label="X values", visible=False, multiselect=True, interactive=True)
  245. with gr.Row():
  246. y_type = gr.Dropdown(xyz_plot_types[gen_type], label="Y type", value=xyz_plot_types[gen_type][0])
  247. y_values = gr.Textbox(label="Y values", lines=1)
  248. y_values_dropdown = gr.Dropdown(label="Y values", visible=False, multiselect=True, interactive=True)
  249. with gr.Row():
  250. z_type = gr.Dropdown(xyz_plot_types[gen_type], label="Z type", value=xyz_plot_types[gen_type][0])
  251. z_values = gr.Textbox(label="Z values", lines=1)
  252. z_values_dropdown = gr.Dropdown(label="Z values", visible=False, multiselect=True, interactive=True)
  253. with gr.Row():
  254. with gr.Column():
  255. draw_legend = gr.Checkbox(label='Draw legend', value=True)
  256. no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
  257. vary_seeds_x = gr.Checkbox(label='Vary seeds for X', value=False)
  258. vary_seeds_y = gr.Checkbox(label='Vary seeds for Y', value=False)
  259. vary_seeds_z = gr.Checkbox(label='Vary seeds for Z', value=False)
  260. with gr.Column():
  261. include_lone_images = gr.Checkbox(label='Include Sub Images', value=True)
  262. include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False)
  263. csv_mode = gr.Checkbox(label='Use text inputs instead of dropdowns', value=False)
  264. margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2)
  265. x_type.change(fn=xyz_update_choices, inputs=x_type, outputs=[x_values, x_values_dropdown])
  266. y_type.change(fn=xyz_update_choices, inputs=y_type, outputs=[y_values, y_values_dropdown])
  267. z_type.change(fn=xyz_update_choices, inputs=z_type, outputs=[z_values, z_values_dropdown])
  268. xyz_inputs = [xyz_gen_type, enable_xyz_plot, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode]
  269. for gr_block in xyz_inputs:
  270. if type(gr_block) is gr.components.slider.Slider:
  271. gr_block.release(fn=xyz_update_args, inputs=xyz_inputs, outputs=None)
  272. else:
  273. gr_block.change(fn=xyz_update_args, inputs=xyz_inputs, outputs=None)
  274. return demo
  275. def add_adetailer(payload, gen_type):
  276. global ad_args, ad_skip_img2img
  277. args = ad_args[gen_type]
  278. args = dict(sorted(args.items(), key=lambda x: x[0]))
  279. payload["alwayson_scripts"]["adetailer"] = {"args": []}
  280. if args == {}:
  281. return payload
  282. if gen_type == "img2img":
  283. payload["alwayson_scripts"]["adetailer"]["args"] = [True, ad_skip_img2img]
  284. else:
  285. payload["alwayson_scripts"]["adetailer"]["args"] = [True, False]
  286. for i in args:
  287. payload["alwayson_scripts"]["adetailer"]["args"].append(args[i])
  288. return payload
  289. def ad_update_args(*args):
  290. if "sd-webui-controlnet" in extensions:
  291. ad_gen_type, ad_num, enable_ad, ad_model, ad_prompt, ad_negative_prompt, ad_confidence, ad_mask_min_ratio, ad_mask_k_largest, ad_mask_max_ratio, ad_x_offset, ad_y_offset, ad_dilate_erode, ad_mask_merge_invert, ad_mask_blur, ad_denoising_strength, ad_inpaint_only_masked, ad_use_inpaint_width_height, ad_inpaint_only_masked_padding, ad_inpaint_width, ad_inpaint_height, ad_use_steps, ad_use_cfg_scale, ad_steps, ad_cfg_scale, ad_use_checkpoint, ad_use_vae, ad_checkpoint, ad_vae, ad_use_sampler, ad_sampler, ad_scheduler, ad_use_noise_multiplier, ad_use_clip_skip, ad_noise_multiplier, ad_clip_skip, ad_restore_face, ad_controlnet_model, ad_controlnet_module, ad_controlnet_weight, ad_controlnet_guidance_start, ad_controlnet_guidance_end = args
  292. else:
  293. ad_gen_type, ad_num, enable_ad, ad_model, ad_prompt, ad_negative_prompt, ad_confidence, ad_mask_min_ratio, ad_mask_k_largest, ad_mask_max_ratio, ad_x_offset, ad_y_offset, ad_dilate_erode, ad_mask_merge_invert, ad_mask_blur, ad_denoising_strength, ad_inpaint_only_masked, ad_use_inpaint_width_height, ad_inpaint_only_masked_padding, ad_inpaint_width, ad_inpaint_height, ad_use_steps, ad_use_cfg_scale, ad_steps, ad_cfg_scale, ad_use_checkpoint, ad_use_vae, ad_checkpoint, ad_vae, ad_use_sampler, ad_sampler, ad_scheduler, ad_use_noise_multiplier, ad_use_clip_skip, ad_noise_multiplier, ad_clip_skip, ad_restore_face = args
  294. global ad_args
  295. args = {
  296. "ad_model": ad_model,
  297. "ad_model_classes": "",
  298. "ad_prompt": ad_prompt,
  299. "ad_negative_prompt": ad_negative_prompt,
  300. "ad_confidence": ad_confidence,
  301. "ad_mask_k_largest": ad_mask_k_largest,
  302. "ad_mask_min_ratio": ad_mask_min_ratio,
  303. "ad_mask_max_ratio": ad_mask_max_ratio,
  304. "ad_dilate_erode": ad_dilate_erode,
  305. "ad_x_offset": ad_x_offset,
  306. "ad_y_offset": ad_y_offset,
  307. "ad_mask_merge_invert": ad_mask_merge_invert,
  308. "ad_mask_blur": ad_mask_blur,
  309. "ad_denoising_strength": ad_denoising_strength,
  310. "ad_inpaint_only_masked": ad_inpaint_only_masked,
  311. "ad_inpaint_only_masked_padding": ad_inpaint_only_masked_padding,
  312. "ad_use_inpaint_width_height": ad_use_inpaint_width_height,
  313. "ad_inpaint_width": ad_inpaint_width,
  314. "ad_inpaint_height": ad_inpaint_height,
  315. "ad_use_steps": ad_use_steps,
  316. "ad_steps": ad_steps,
  317. "ad_use_cfg_scale": ad_use_cfg_scale,
  318. "ad_cfg_scale": ad_cfg_scale,
  319. "ad_use_checkpoint": ad_use_checkpoint,
  320. "ad_checkpoint": ad_checkpoint,
  321. "ad_use_vae": ad_use_vae,
  322. "ad_vae": ad_vae,
  323. "ad_use_sampler": ad_use_sampler,
  324. "ad_sampler": ad_sampler,
  325. "ad_scheduler": ad_scheduler,
  326. "ad_use_noise_multiplier": ad_use_noise_multiplier,
  327. "ad_noise_multiplier": ad_noise_multiplier,
  328. "ad_use_clip_skip": ad_use_clip_skip,
  329. "ad_clip_skip": ad_clip_skip,
  330. "ad_restore_face": ad_restore_face,
  331. }
  332. if "sd-webui-controlnet" in extensions:
  333. args["ad_controlnet_model"] = ad_controlnet_model
  334. args["ad_controlnet_module"] = ad_controlnet_module
  335. args["ad_controlnet_weight"] = ad_controlnet_weight
  336. args["ad_controlnet_guidance_start"] = ad_controlnet_guidance_start
  337. args["ad_controlnet_guidance_end"] = ad_controlnet_guidance_end
  338. if enable_ad == True:
  339. ad_args[ad_gen_type][ad_num] = args
  340. else:
  341. del ad_args[ad_gen_type][ad_num]
  342. def ad_update_cn_module_choices(ad_controlnet_model):
  343. if ad_controlnet_model == "control_v11f1p_sd15_depth [1a8eb83c]":
  344. return gr.update(choices=["depth_midas", "depth_hand_refiner"], visible=True, value="depth_midas")
  345. if ad_controlnet_model == "control_v11p_sd15_inpaint [dfe64acb]":
  346. return gr.update(choices=["inpaint_global_harmonious", "inpaint_only", "inpaint_only+lama"], visible=True, value="inpaint_global_harmonious")
  347. if ad_controlnet_model == "control_v11p_sd15_lineart [2c3004a6]":
  348. return gr.update(choices=["lineart_coarse", "lineart_realistic", "lineart_anime", "lineart_anime_denoise"], visible=True, value="lineart_coarse")
  349. if ad_controlnet_model == "control_v11p_sd15_openpose [52e0ea54]":
  350. return gr.update(choices=["openpose_full", "dw_openpose_full"], visible=True, value="openpose_full")
  351. if ad_controlnet_model == "control_v11p_sd15_scribble [46a6fcd7]":
  352. return gr.update(choices=["t2ia_sketch_pidi"], visible=True, value="t2ia_sketch_pidi")
  353. if ad_controlnet_model == "control_v11p_sd15s2_lineart_anime [19a26aa8]":
  354. return gr.update(choices=["lineart_coarse", "lineart_realistic", "lineart_anime", "lineart_anime_denoise"], visible=True, value="lineart_coarse")
  355. return gr.update(visible=False)
  356. def ad_update_skip_img2img(arg):
  357. global ad_skip_img2img
  358. ad_skip_img2img = arg
  359. def ad_blocks(i, gen_type):
  360. with gr.Blocks() as demo:
  361. ad_gen_type = gr.Textbox(visible=False, value=gen_type)
  362. ad_num = gr.Textbox(visible=False, value=i)
  363. enable_ad = gr.Checkbox(label="Enable")
  364. ad_model = gr.Dropdown(ad_models, label="ADetailer model", value=default["ad_model"][i])
  365. ad_prompt = gr.Textbox(show_label=False, placeholder="ADetailer prompt" + "\nIf blank, the main prompt is used.", lines=3)
  366. ad_negative_prompt = gr.Textbox(show_label=False, placeholder="ADetailer negative prompt" + "\nIf blank, the main negative prompt is used.", lines=3)
  367. with gr.Tab("Detection"):
  368. with gr.Row():
  369. ad_confidence = gr.Slider(label="Detection model confidence threshold", minimum=0, maximum=1, step=0.01, value=0.3)
  370. ad_mask_min_ratio = gr.Slider(label="Mask min area ratio", minimum=0, maximum=1, step=0.001, value=0)
  371. with gr.Row():
  372. ad_mask_k_largest = gr.Slider(label="Mask only the top k largest (0 to disable)", minimum=0, maximum=10, step=1, value=0)
  373. ad_mask_max_ratio = gr.Slider(label="Mask max area ratio", minimum=0, maximum=1, step=0.001, value=1)
  374. with gr.Tab("Mask Preprocessing"):
  375. with gr.Row():
  376. ad_x_offset = gr.Slider(label="Mask x(→) offset", minimum=-200, maximum=200, step=1, value=0)
  377. ad_y_offset = gr.Slider(label="Mask y(↑) offset", minimum=-200, maximum=200, step=1, value=0)
  378. ad_dilate_erode = gr.Slider(label="Mask erosion (-) / dilation (+)", minimum=-128, maximum=128, step=4, value=4)
  379. ad_mask_merge_invert = gr.Radio(["None", "Merge", "Merge and Invert"], label="Mask merge mode", value="None")
  380. with gr.Tab("Inpainting"):
  381. with gr.Row():
  382. ad_mask_blur = gr.Slider(label="Inpaint mask blur", minimum=0, maximum=64, step=1, value=4)
  383. ad_denoising_strength = gr.Slider(label="Inpaint denoising strength", minimum=0, maximum=1, step=0.01, value=0.4)
  384. with gr.Row():
  385. ad_inpaint_only_masked = gr.Checkbox(label="Inpaint only masked", value=True)
  386. ad_use_inpaint_width_height = gr.Checkbox(label="Use separate width/height")
  387. with gr.Row():
  388. ad_inpaint_only_masked_padding = gr.Slider(label="Inpaint only masked padding, pixels", minimum=0, maximum=256, step=4, value=32)
  389. with gr.Column():
  390. ad_inpaint_width = gr.Slider(label="inpaint width", minimum=64, maximum=2048, step=default["size_step"], value=512)
  391. ad_inpaint_height = gr.Slider(label="inpaint height", minimum=64, maximum=2048, step=default["size_step"], value=512)
  392. with gr.Row():
  393. ad_use_steps = gr.Checkbox(label="Use separate steps")
  394. ad_use_cfg_scale = gr.Checkbox(label="Use separate CFG scale")
  395. with gr.Row():
  396. ad_steps = gr.Slider(label="ADetailer steps", minimum=1, maximum=150, step=1, value=28)
  397. ad_cfg_scale = gr.Slider(label="ADetailer CFG scale", minimum=0, maximum=30, step=0.5, value=7)
  398. with gr.Row():
  399. ad_use_checkpoint = gr.Checkbox(label="Use separate checkpoint")
  400. ad_use_vae = gr.Checkbox(label="Use separate VAE")
  401. with gr.Row():
  402. ckpts = ["Use same checkpoint"]
  403. for model in sd_models:
  404. ckpts.append(model)
  405. ad_checkpoint = gr.Dropdown(ckpts, label="ADetailer checkpoint", value=ckpts[0])
  406. vaes = ["Use same VAE"]
  407. for vae in sd_vaes:
  408. vaes.append(vae)
  409. ad_vae = gr.Dropdown(vaes, label="ADetailer VAE", value=vaes[0])
  410. ad_use_sampler = gr.Checkbox(label="Use separate sampler")
  411. with gr.Row():
  412. ad_sampler = gr.Dropdown(samplers, label="ADetailer sampler", value=samplers[0])
  413. scheduler_names = ["Use same scheduler"]
  414. for scheduler in schedulers:
  415. scheduler_names.append(scheduler)
  416. ad_scheduler = gr.Dropdown(scheduler_names, label="ADetailer scheduler", value=scheduler_names[0])
  417. with gr.Row():
  418. ad_use_noise_multiplier = gr.Checkbox(label="Use separate noise multiplier")
  419. ad_use_clip_skip = gr.Checkbox(label="Use separate CLIP skip")
  420. with gr.Row():
  421. ad_noise_multiplier = gr.Slider(label="Noise multiplier for img2img", minimum=0.5, maximum=1.5, step=0.01, value=1)
  422. ad_clip_skip = gr.Slider(label="ADetailer CLIP skip", minimum=1, maximum=12, step=1, value=1)
  423. ad_restore_face = gr.Checkbox(label="Restore faces after ADetailer")
  424. if "sd-webui-controlnet" in extensions:
  425. with gr.Tab("ControlNet"):
  426. with gr.Row():
  427. ad_cn_models = ["None", "Passthrough", "control_v11f1p_sd15_depth [1a8eb83c]", "control_v11p_sd15_inpaint [dfe64acb]", "control_v11p_sd15_lineart [2c3004a6]", "control_v11p_sd15_openpose [52e0ea54]", "control_v11p_sd15_scribble [46a6fcd7]", "control_v11p_sd15s2_lineart_anime [19a26aa8]"]
  428. ad_controlnet_model = gr.Dropdown(ad_cn_models, label="ControlNet model", value="None")
  429. ad_controlnet_module = gr.Dropdown(["None"], label="ControlNet module", value="None", visible=False)
  430. ad_controlnet_model.change(fn= ad_update_cn_module_choices, inputs=ad_controlnet_model, outputs=ad_controlnet_module)
  431. with gr.Row():
  432. ad_controlnet_weight = gr.Slider(label="Control Weight", minimum=0, maximum=1, step=0.01, value=1)
  433. ad_controlnet_guidance_start = gr.Slider(label="Starting Control Step", minimum=0, maximum=1, step=0.01, value=0)
  434. ad_controlnet_guidance_end = gr.Slider(label="Ending Control Step", minimum=0, maximum=1, step=0.01, value=1)
  435. if "sd-webui-controlnet" in extensions:
  436. ad_inputs = [ad_gen_type, ad_num, enable_ad, ad_model, ad_prompt, ad_negative_prompt, ad_confidence, ad_mask_min_ratio, ad_mask_k_largest, ad_mask_max_ratio, ad_x_offset, ad_y_offset, ad_dilate_erode, ad_mask_merge_invert, ad_mask_blur, ad_denoising_strength, ad_inpaint_only_masked, ad_use_inpaint_width_height, ad_inpaint_only_masked_padding, ad_inpaint_width, ad_inpaint_height, ad_use_steps, ad_use_cfg_scale, ad_steps, ad_cfg_scale, ad_use_checkpoint, ad_use_vae, ad_checkpoint, ad_vae, ad_use_sampler, ad_sampler, ad_scheduler, ad_use_noise_multiplier, ad_use_clip_skip, ad_noise_multiplier, ad_clip_skip, ad_restore_face, ad_controlnet_model, ad_controlnet_module, ad_controlnet_weight, ad_controlnet_guidance_start, ad_controlnet_guidance_end]
  437. else:
  438. ad_inputs = [ad_gen_type, ad_num, enable_ad, ad_model, ad_prompt, ad_negative_prompt, ad_confidence, ad_mask_min_ratio, ad_mask_k_largest, ad_mask_max_ratio, ad_x_offset, ad_y_offset, ad_dilate_erode, ad_mask_merge_invert, ad_mask_blur, ad_denoising_strength, ad_inpaint_only_masked, ad_use_inpaint_width_height, ad_inpaint_only_masked_padding, ad_inpaint_width, ad_inpaint_height, ad_use_steps, ad_use_cfg_scale, ad_steps, ad_cfg_scale, ad_use_checkpoint, ad_use_vae, ad_checkpoint, ad_vae, ad_use_sampler, ad_sampler, ad_scheduler, ad_use_noise_multiplier, ad_use_clip_skip, ad_noise_multiplier, ad_clip_skip, ad_restore_face]
  439. for gr_block in ad_inputs:
  440. if type(gr_block) is gr.components.slider.Slider:
  441. gr_block.release(fn=ad_update_args, inputs=ad_inputs, outputs=None)
  442. else:
  443. gr_block.change(fn=ad_update_args, inputs=ad_inputs, outputs=None)
  444. return demo
  445. def add_controlnet(payload, gen_type):
  446. global cn_args
  447. args = cn_args[gen_type]
  448. args = dict(sorted(args.items(), key=lambda x: x[0]))
  449. payload["alwayson_scripts"]["controlnet"] = {"args": []}
  450. if args == {}:
  451. return payload
  452. for i in args:
  453. payload["alwayson_scripts"]["controlnet"]["args"].append(args[i])
  454. return payload
  455. def cn_preprocess(cn_module, cn_input_image):
  456. if cn_input_image is None:
  457. return None
  458. cn_input_image = pil_to_base64(cn_input_image)
  459. payload = {
  460. "controlnet_module": cn_module,
  461. "controlnet_input_images": [cn_input_image]
  462. }
  463. response = requests.post(url=f"{url}/controlnet/detect", json=payload)
  464. images_base64 = response.json()["images"][0]
  465. image_pil = base64_to_pil(images_base64)
  466. if save_images == "Yes":
  467. save_image(image_pil, "ControlNet", "detect")
  468. return image_pil
  469. def cn_update_args(*args):
  470. cn_gen_type, cn_num, enable_cn, enable_low_vram, enable_pixel_perfect, cn_module, cn_model, cn_input_image, cn_mask, cn_weight, cn_guidance_start, cn_guidance_end, cn_resolution, cn_control_mode, cn_resize_mode = args
  471. global cn_args
  472. if not cn_input_image is None:
  473. cn_input_image = pil_to_base64(cn_input_image)
  474. if not cn_mask is None:
  475. cn_mask = pil_to_base64(cn_mask)
  476. args = {
  477. "input_image": cn_input_image,
  478. "module": cn_module,
  479. "model": cn_model,
  480. "low_vram": enable_low_vram,
  481. "pixel_perfect": enable_pixel_perfect,
  482. "mask": cn_mask,
  483. "weight": cn_weight,
  484. "guidance_start": cn_guidance_start,
  485. "guidance_end": cn_guidance_end,
  486. "processor_res": cn_resolution,
  487. "control_mode": cn_control_mode,
  488. "resize_mode": cn_resize_mode
  489. }
  490. if enable_cn == True:
  491. cn_args[cn_gen_type][cn_num] = args
  492. else:
  493. del cn_args[cn_gen_type][cn_num]
  494. def cn_update_choices(cn_type):
  495. module_list = cn_types_list[cn_type]["module_list"]
  496. model_list = cn_types_list[cn_type]["model_list"]
  497. default_option = cn_types_list[cn_type]["default_option"]
  498. default_model = cn_types_list[cn_type]["default_model"]
  499. return gr.update(choices=module_list, value=default_option), gr.update(choices=model_list, value=default_model)
  500. def cn_blocks(i, gen_type):
  501. with gr.Blocks() as demo:
  502. with gr.Row():
  503. cn_gen_type = gr.Textbox(visible=False, value=gen_type)
  504. cn_num = gr.Textbox(visible=False, value=i)
  505. enable_cn = gr.Checkbox(label="Enable")
  506. enable_low_vram = gr.Checkbox(label="Low VRAM")
  507. enable_pixel_perfect = gr.Checkbox(label="Pixel Perfect")
  508. enable_mask_upload = gr.Checkbox(label="Effective Region Mask")
  509. with gr.Row():
  510. cn_type = gr.Dropdown(cn_types, label="ControlNet type", value=cn_default_type)
  511. cn_btn = gr.Button("Preprocess | 预处理", elem_id="button")
  512. with gr.Row():
  513. cn_module = gr.Dropdown(cn_module_list, label="ControlNet module", value=cn_default_option)
  514. cn_model = gr.Dropdown(cn_model_list, label="ControlNet model", value=cn_default_model)
  515. with gr.Row():
  516. cn_input_image = gr.Image(type="pil")
  517. cn_detect_image = gr.Image(label="Preprocessor Preview")
  518. cn_mask = gr.Image(label="Effective Region Mask", interactive=True, visible=False)
  519. with gr.Row():
  520. cn_weight = gr.Slider(label="Control Weight", minimum=0, maximum=2, step=0.05, value=1)
  521. cn_guidance_start = gr.Slider(label="Starting Control Step", minimum=0, maximum=1, step=0.01, value=0)
  522. cn_guidance_end = gr.Slider(label="Ending Control Step", minimum=0, maximum=1, step=0.01, value=1)
  523. cn_resolution = gr.Slider(label="Resolution", minimum=64, maximum=2048, step=default["size_step"], value=512)
  524. cn_control_mode = gr.Radio(["Balanced", "My prompt is more important", "ControlNet is more important"], label="Control Mode", value="Balanced")
  525. cn_resize_mode = gr.Radio(["Just Resize", "Crop and Resize", "Resize and Fill"], label="Resize Mode", value="Crop and Resize")
  526. enable_mask_upload.change(fn=gr_update_visible, inputs=enable_mask_upload, outputs=cn_mask)
  527. cn_type.change(fn=cn_update_choices, inputs=cn_type, outputs=[cn_module, cn_model])
  528. cn_btn.click(fn=cn_preprocess, inputs=[cn_module, cn_input_image], outputs=cn_detect_image)
  529. cn_inputs = [cn_gen_type, cn_num, enable_cn, enable_low_vram, enable_pixel_perfect, cn_module, cn_model, cn_input_image, cn_mask, cn_weight, cn_guidance_start, cn_guidance_end, cn_resolution, cn_control_mode, cn_resize_mode]
  530. for gr_block in cn_inputs:
  531. if type(gr_block) is gr.components.slider.Slider:
  532. gr_block.release(fn=cn_update_args, inputs=cn_inputs, outputs=None)
  533. else:
  534. gr_block.change(fn=cn_update_args, inputs=cn_inputs, outputs=None)
  535. return demo
  536. def generate(input_image, sd_model, sd_vae, sampler_name, scheduler, clip_skip, steps, width, batch_size, height, batch_count, cfg_scale, randn_source, seed, denoising_strength, prompt, negative_prompt, progress=gr.Progress()):
  537. global interrupt, xyz_args
  538. interrupt = False
  539. if denoising_strength >= 0:
  540. gen_type = "img2img"
  541. if input_image is None:
  542. return None, None, None
  543. else:
  544. gen_type = "txt2img"
  545. progress(0, desc=f"Loading {sd_model}")
  546. payload = {
  547. "sd_model_checkpoint": sd_models_list[sd_model],
  548. "sd_vae": sd_vae,
  549. "CLIP_stop_at_last_layers": clip_skip,
  550. "randn_source": randn_source
  551. }
  552. requests.post(url=f"{url}/sdapi/v1/options", json=payload)
  553. if interrupt == True:
  554. return None, None, None
  555. progress(0, desc="Processing...")
  556. images = []
  557. images_info = []
  558. if not input_image is None:
  559. input_image = pil_to_base64(input_image)
  560. for i in range(batch_count):
  561. payload = {
  562. "prompt": prompt,
  563. "negative_prompt": negative_prompt,
  564. "batch_size": batch_size,
  565. "seed": seed,
  566. "sampler_name": sampler_name,
  567. "scheduler": scheduler,
  568. "steps": steps,
  569. "cfg_scale": cfg_scale,
  570. "width": width,
  571. "height": height,
  572. "init_images": [input_image],
  573. "denoising_strength": denoising_strength,
  574. "alwayson_scripts": {}
  575. }
  576. if "adetailer" in extensions:
  577. payload = add_adetailer(payload, gen_type)
  578. if "sd-webui-controlnet" in extensions:
  579. payload = add_controlnet(payload, gen_type)
  580. payload = add_xyz_plot(payload, gen_type)
  581. response = requests.post(url=f"{url}/sdapi/v1/{gen_type}", json=payload)
  582. images_base64 = response.json()["images"]
  583. for j in range(len(images_base64)):
  584. image_pil = base64_to_pil(images_base64[j])
  585. images.append(image_pil)
  586. image_info = get_png_info(image_pil)
  587. images_info.append(image_info)
  588. if image_info == "None":
  589. if save_images == "Yes":
  590. if gen_type in xyz_args:
  591. save_image(image_pil, "XYZ_Plot", "grid")
  592. else:
  593. save_image(image_pil, "ControlNet", "detect")
  594. else:
  595. seed = re.findall("Seed: [0-9]+", image_info)[0].split(": ")[-1]
  596. if save_images == "Yes":
  597. save_image(image_pil, sd_model, seed)
  598. seed = int(seed) + 1
  599. progress((i+1)/batch_count, desc=f"Batch count: {(i+1)}/{batch_count}")
  600. if interrupt == True:
  601. return images, images_info, datetime.datetime.now()
  602. return images, images_info, datetime.datetime.now()
  603. def gen_clear_geninfo():
  604. return None
  605. def gen_update_geninfo(images_info):
  606. if images_info == [] or images_info is None:
  607. return None
  608. return images_info[0]
  609. def gen_update_selected_geninfo(images_info, evt: gr.SelectData):
  610. return images_info[evt.index]
  611. def gen_blocks(gen_type):
  612. with gr.Blocks() as demo:
  613. with gr.Row():
  614. with gr.Column():
  615. prompt = gr.Textbox(placeholder="Prompt", show_label=False, value=default["prompt"], lines=3)
  616. negative_prompt = gr.Textbox(placeholder="Negative prompt", show_label=False, value=default["negative_prompt"], lines=3)
  617. if gen_type == "txt2img":
  618. input_image = gr.Image(visible=False)
  619. else:
  620. input_image = gr.Image(type="pil")
  621. with gr.Tab("Generation"):
  622. with gr.Row():
  623. sd_model = gr.Dropdown(sd_models, label="SD Model", value=sd_models[0])
  624. sd_vae = gr.Dropdown(sd_vaes, label="SD VAE", value=sd_vaes[0])
  625. clip_skip = gr.Slider(minimum=1, maximum=12, step=1, label="Clip skip", value=default["clip_skip"])
  626. with gr.Row():
  627. sampler_name = gr.Dropdown(samplers, label="Sampling method", value=samplers[0])
  628. scheduler = gr.Dropdown(schedulers, label="Schedule type", value=schedulers[0])
  629. steps = gr.Slider(minimum=1, maximum=100, step=1, label="Sampling steps", value=default["steps"])
  630. with gr.Row():
  631. width = gr.Slider(minimum=64, maximum=2048, step=default["size_step"], label="Width", value=default["width"])
  632. batch_size = gr.Slider(minimum=1, maximum=8, step=1, label="Batch size", value=1)
  633. with gr.Row():
  634. height = gr.Slider(minimum=64, maximum=2048, step=default["size_step"], label="Height", value=default["height"])
  635. batch_count = gr.Slider(minimum=1, maximum=100, step=1, label="Batch count", value=1)
  636. with gr.Row():
  637. cfg_scale = gr.Slider(minimum=1, maximum=30, step=0.5, label="CFG Scale", value=default["cfg"])
  638. if gen_type == "txt2img":
  639. denoising_strength = gr.Slider(minimum=-1, maximum=1, step=1, value=-1, visible=False)
  640. else:
  641. denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Denoising strength", value=0.7)
  642. with gr.Row():
  643. randn_source = gr.Dropdown(["CPU", "GPU"], label="RNG", value="CPU")
  644. seed = gr.Textbox(label="Seed", value=-1)
  645. if "adetailer" in extensions:
  646. with gr.Tab("ADetailer"):
  647. if gen_type == "img2img":
  648. with gr.Row():
  649. ad_skip_img2img = gr.Checkbox(label="Skip img2img", visible=True)
  650. ad_skip_img2img.change(fn=ad_update_skip_img2img, inputs=ad_skip_img2img, outputs=None)
  651. for i in range(default["ad_nums"]):
  652. with gr.Tab(f"ADetailer {ordinal(i + 1)}"): ad_blocks(i, gen_type)
  653. if "sd-webui-controlnet" in extensions:
  654. with gr.Tab("ControlNet"):
  655. for i in range(default["cn_nums"]):
  656. with gr.Tab(f"ControlNet Unit {i}"): cn_blocks(i, gen_type)
  657. if not loras == [] or not embeddings == []:
  658. with gr.Tab("Extra Networks"):
  659. if not loras == []:
  660. lora = gr.Dropdown(loras, label="Lora", multiselect=True, interactive=True)
  661. lora.change(fn=add_lora, inputs=[prompt, lora], outputs=prompt)
  662. if not embeddings == []:
  663. embedding = gr.Dropdown(embeddings, label="Embedding", multiselect=True, interactive=True)
  664. embedding.change(fn=add_embedding, inputs=[negative_prompt, embedding], outputs=negative_prompt)
  665. with gr.Tab("X/Y/Z plot"): xyz_blocks(gen_type)
  666. with gr.Column():
  667. with gr.Row():
  668. btn = gr.Button("Generate | 生成", elem_id="button")
  669. btn2 = gr.Button("Interrupt | 终止")
  670. gallery = gr.Gallery(preview=True, height=default["gallery_height"])
  671. image_geninfo = gr.Markdown()
  672. images_geninfo = gr.State()
  673. update_geninfo = gr.Textbox(visible=False)
  674. gen_inputs = [input_image, sd_model, sd_vae, sampler_name, scheduler, clip_skip, steps, width, batch_size, height, batch_count, cfg_scale, randn_source, seed, denoising_strength, prompt, negative_prompt]
  675. btn.click(fn=gen_clear_geninfo, inputs=None, outputs=image_geninfo)
  676. btn.click(fn=generate, inputs=gen_inputs, outputs=[gallery, images_geninfo, update_geninfo])
  677. btn2.click(fn=post_interrupt, inputs=None, outputs=None)
  678. gallery.select(fn=gen_update_selected_geninfo, inputs=images_geninfo, outputs=image_geninfo)
  679. update_geninfo.change(fn=gen_update_geninfo, inputs=images_geninfo, outputs=image_geninfo)
  680. return demo
  681. def extras(input_image, upscaler_1, upscaler_2, upscaling_resize, extras_upscaler_2_visibility, enable_gfpgan, gfpgan_visibility, enable_codeformer, codeformer_visibility, codeformer_weight):
  682. if input_image is None:
  683. return None
  684. input_image = pil_to_base64(input_image)
  685. if enable_gfpgan == False:
  686. gfpgan_visibility = 0
  687. if enable_codeformer == False:
  688. codeformer_visibility = 0
  689. payload = {
  690. "gfpgan_visibility": gfpgan_visibility,
  691. "codeformer_visibility": codeformer_visibility,
  692. "codeformer_weight": codeformer_weight,
  693. "upscaling_resize": upscaling_resize,
  694. "upscaler_1": upscaler_1,
  695. "upscaler_2": upscaler_2,
  696. "extras_upscaler_2_visibility": extras_upscaler_2_visibility,
  697. "image": input_image
  698. }
  699. response = requests.post(url=f"{url}/sdapi/v1/extra-single-image", json=payload)
  700. images_base64 = response.json()["image"]
  701. image_pil = base64_to_pil(images_base64)
  702. if save_images == "Yes":
  703. save_image(image_pil, "Extras", "image")
  704. return image_pil
  705. def extras_blocks():
  706. with gr.Blocks() as demo:
  707. with gr.Row():
  708. with gr.Column():
  709. input_image = gr.Image(type="pil")
  710. with gr.Row():
  711. upscaler_1 = gr.Dropdown(upscalers, label="Upscaler 1", value="R-ESRGAN 4x+")
  712. upscaler_2 = gr.Dropdown(upscalers, label="Upscaler 2", value="None")
  713. with gr.Row():
  714. upscaling_resize = gr.Slider(minimum=1, maximum=8, step=0.05, label="Scale by", value=4)
  715. extras_upscaler_2_visibility = gr.Slider(minimum=0, maximum=1, step=0.001, label="Upscaler 2 visibility", value=0)
  716. enable_gfpgan = gr.Checkbox(label="Enable GFPGAN")
  717. gfpgan_visibility = gr.Slider(minimum=0, maximum=1, step=0.001, label="GFPGAN Visibility", value=1)
  718. enable_codeformer = gr.Checkbox(label="Enable CodeFormer")
  719. codeformer_visibility = gr.Slider(minimum=0, maximum=1, step=0.001, label="CodeFormer Visibility", value=1)
  720. codeformer_weight = gr.Slider(minimum=0, maximum=1, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0)
  721. with gr.Column():
  722. with gr.Row():
  723. btn = gr.Button("Generate | 生成", elem_id="button")
  724. btn2 = gr.Button("Interrupt | 终止")
  725. extra_image = gr.Image(label="Extras image")
  726. btn.click(fn=extras, inputs=[input_image, upscaler_1, upscaler_2, upscaling_resize, extras_upscaler_2_visibility, enable_gfpgan, gfpgan_visibility, enable_codeformer, codeformer_visibility, codeformer_weight], outputs=extra_image)
  727. btn2.click(fn=post_interrupt, inputs=None, outputs=None)
  728. return demo
  729. def get_png_info(image_pil):
  730. image_info=[]
  731. if image_pil is None:
  732. return None
  733. for key, value in image_pil.info.items():
  734. image_info.append(value)
  735. if not image_info == []:
  736. image_info = image_info[0]
  737. image_info = re.sub(r"<", "\<", image_info)
  738. image_info = re.sub(r">", "\>", image_info)
  739. image_info = re.sub(r"\n", "<br>", image_info)
  740. else:
  741. image_info = "None"
  742. return image_info
  743. def png_info_blocks():
  744. with gr.Blocks() as demo:
  745. with gr.Row():
  746. with gr.Column():
  747. input_image = gr.Image(value=None, type="pil")
  748. with gr.Column():
  749. png_info = gr.Markdown()
  750. input_image.change(fn=get_png_info, inputs=input_image, outputs=png_info)
  751. return demo
  752. with gr.Blocks(css="#button {background: #FFE1C0; color: #FF453A} .block.padded:not(.gradio-accordion) {padding: 0 !important;} div.form {border-width: 0; box-shadow: none; background: white; gap: 0.5em;}") as demo:
  753. with gr.Tab("txt2img"): gen_blocks("txt2img")
  754. with gr.Tab("img2img"): gen_blocks("img2img")
  755. with gr.Tab("Extras"): extras_blocks()
  756. with gr.Tab("PNG Info"): png_info_blocks()
  757. demo.queue(concurrency_count=100).launch(inbrowser=True)

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/秋刀鱼在做梦/article/detail/887616
推荐阅读
相关标签
  

闽ICP备14008679号