赞
踩
一、下载安装启动comfyui(ComfyUI_windows_portable)
双击run_nvidia_gpu.bat开启命令行
下载workflow_api.json
二、写api接口
1.get_output
- import json
- import requests
- import urllib.request
- import urllib.parse
- import websockets
-
- from demo.utils import image_to_base64
-
- server_address = "127.0.0.1:8188"
- def queue_prompt(prompt, client_id):
- url = f"http://{server_address}/prompt"
- payload = {"prompt": prompt, "client_id": client_id}
- response = requests.post(url, json=payload)
- if response.status_code == 200:
- return response.json()
- else:
- response.raise_for_status()
-
- def get_image_url(filename, subfolder, folder_type):
- data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
- url_values = urllib.parse.urlencode(data)
- image_url = "http://{}/view?{}".format(server_address, url_values)
- return image_url
-
- def get_history(prompt_id):
- with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
- return json.loads(response.read())
-
- async def get_outputs(client_id, prompt):
- prompt_id = queue_prompt(prompt, client_id)['prompt_id']
- output_images = []
- output_tags = []
- async with websockets.connect(f"ws://{server_address}/ws?clientId={client_id}") as websocket:
- while True:
- out = await websocket.recv()
- if isinstance(out, str):
- message = json.loads(out)
- if message['type'] == 'executing':
- data = message['data']
- if data['node'] is None and data['prompt_id'] == prompt_id:
- break
- history = get_history(prompt_id)[prompt_id]
- for node_id, node_output in history['outputs'].items():
- if 'images' in node_output:
- # images_output = []
- # for image in node_output['images']:
- # image_url = get_image_url(image['filename'], image['subfolder'], image['type'])
- # images_output.append(image_url)
- # output_images_url[node_id] = images_output
- for image in node_output['images']:
- print(image)
- image_base64 = image_to_base64(image['filename'])
- # images_output.append(f"./temp/{image['filename']}")
- # img_path = f"./temp/{image['filename']}"
- # image_name = uuid.uuid4().hex + ".png"
- # bucket.put_object_from_file(f"ai/image/{image_name}", img_path)
- # images_output.append(f"{oss_img_url}/{image_name}")
- output_images.append(image_base64)
- # output_images[node_id] = images_output
-
- if 'tags' in node_output:
- # tags_output = []
- for tag in node_output['tags']:
- output_tags.append(tag)
- # output_tags[node_id] = tags_output
-
- return {"images": output_images, "tags": output_tags}
2.utils.py
- import json
- import base64
- def load_json_template(file_path):
- with open(file_path, 'r') as file:
- return json.load(file)
-
- def image_to_base64(filename):
- image_path = f"./output/{filename}"
- with open(image_path, "rb") as image_file:
- encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
- return encoded_string
3.router.py
- import os
- import random
- import uuid
- import requests
- from demo.utils import load_json_template
- # from api_utils.prompt_loader import load_checkpoint, load_controlnet, load_loras, load_prompt, load_controlnet_webui
- from demo.get_output import get_outputs
- from fastapi import UploadFile, File, Form
- from fastapi.responses import JSONResponse
-
- async def process_generateimg(data):
- client_id = str(uuid.uuid4())
- prompt = load_json_template('demo/workflow_api.json')
- prompt["3"]["inputs"]["seed"] = random.randrange(10**14, 10**15)
- # if data.denoise:
- # prompt["102"]["inputs"]["denoise"] = data.denoise
- prompt["6"]["inputs"]["text"] = data.prompt
- # prompt["129"]["inputs"]["image"] = data.image
- images =await get_outputs(client_id,prompt)
- return images
4.api.py
- from fastapi import FastAPI, HTTPException
- from pydantic import BaseModel
- from fastapi.staticfiles import StaticFiles
- from fastapi.responses import HTMLResponse, JSONResponse
- # import random
- import json
- from demo.router import *
-
- import base64
- from io import BytesIO
- from PIL import Image
-
- app = FastAPI()
- class TextToImageModel(BaseModel):
- prompt: str
-
- @app.post("/generate_img", tags=["perfume bottle"])
- async def generate_img(data:TextToImageModel):
- return await process_generateimg(data)
-
- # 将 static 目录中的文件作为静态文件提供
- app.mount("/static", StaticFiles(directory="demo/static"), name="static")
-
- @app.get("/", response_class=HTMLResponse)
- def read_root():
- file_path = os.path.join("demo", "static", "index.html")
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- return HTMLResponse(content=f.read(), status_code=200)
- except FileNotFoundError:
- return HTMLResponse(content="File not found", status_code=404)
-
- # UPLOAD_DIRECTORY = "./uploaded_videos"
- # if not os.path.exists(UPLOAD_DIRECTORY):
- # os.makedirs(UPLOAD_DIRECTORY)
-
- if __name__ == '__main__':
- import uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8000)
- # 运行命令:uvicorn api:app --reload
5.静态页面
新建static文件夹,在该文件夹下新建index.html
- <!DOCTYPE html>
- <html>
- <head>
- <title>Image Generator</title>
- </head>
- <body>
- <h1>Image Generator</h1>
- <form id="generateForm">
- <label for="prompt">Prompt:</label>
- <input type="text" id="prompt" name="prompt" required>
- <button type="submit">Generate Image</button>
- </form>
- <div id="images"></div>
-
- <script>
- document.getElementById('generateForm').addEventListener('submit', async function(event) {
- event.preventDefault();
- const prompt = document.getElementById('prompt').value;
- console.log(prompt);
-
- try {
- const response = await fetch('/generate_img', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({ prompt: prompt })
- });
-
- if (!response.ok) {
- throw new Error('Network response was not ok');
- }
-
- const data = await response.json();
- console.log(data);
- const imagesDiv = document.getElementById('images');
- imagesDiv.innerHTML = '';
- data.images.forEach(imageBase64 => {
- const img = document.createElement('img');
- img.onload = function() {
- imagesDiv.appendChild(img);
- };
- img.src = 'data:image/png;base64,' + imageBase64;
- });
- } catch (error) {
- console.error('Error:', error);
- }
- });
-
- </script>
- </body>
- </html>
6.补充COMFYUI安装教程
官网下载地址:
确保将稳定扩散检查点/模型(巨大的 ckpt/safetensors 文件)放入文件中:ComfyUI\models\checkpoints
加载模型 有条件(魔法)的同学可前往C站下载 跳转
此处大模型由 B站UP主秋葉aaaki提供,留下了没用SVIP的
大模型:https://pan.baidu.com/s/1TbA04C3TcOSKUh1Y_9trOg 提取码:aaki
大模型:https://pan.baidu.com/s/1v_RKaBTWkhFYabOvlao7pQ 提取码:aaki
下载完成后将模型放在如下目录 checkpoints 文件夹下
另一个UI如何跟ComfyUI之间共享模型 笔者之前用的是 stable-diffusion-webui ,以此举例将extra_model_paths.yaml.example
文件复制一份,然后通过文本编辑器打开副本文件。
修改配置文件,笔者的controlnet插件是通过Civitai-Helper下载的所有也需要修改,退出保存。
删除extra_model_paths.yaml- 副本.example
yaml文件后字符串(副本.example)后的得到extra_model_paths.yaml
文件。
启动comfyUI如下图根据具体需求选择对应的脚本。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。