当前位置:AIGC资讯 > AIGC > 正文

Datawhale X 魔搭 AI夏令营AIGC方向task3

ComfyUI 样例

下载并安装 ComfyUI

# #@title Environment Setup

from pathlib import Path

OPTIONS = {}
UPDATE_COMFY_UI = True  #@param {type:"boolean"}
INSTALL_COMFYUI_MANAGER = True  #@param {type:"boolean"}
INSTALL_KOLORS = True  #@param {type:"boolean"}
INSTALL_CUSTOM_NODES_DEPENDENCIES = True  #@param {type:"boolean"}
OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI
OPTIONS['INSTALL_COMFYUI_MANAGER'] = INSTALL_COMFYUI_MANAGER
OPTIONS['INSTALL_KOLORS'] = INSTALL_KOLORS
OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES'] = INSTALL_CUSTOM_NODES_DEPENDENCIES

current_dir = !pwd
WORKSPACE = f"{current_dir[0]}/ComfyUI"

%cd /mnt/workspace/

![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI
%cd $WORKSPACE

if OPTIONS['UPDATE_COMFY_UI']:
  !echo "-= Updating ComfyUI =-"
  !git pull


if OPTIONS['INSTALL_COMFYUI_MANAGER']:
  %cd custom_nodes
  ![ ! -d ComfyUI-Manager ] && echo -= Initial setup ComfyUI-Manager =- && git clone https://github.com/ltdrdata/ComfyUI-Manager
  %cd ComfyUI-Manager
  !git pull

if OPTIONS['INSTALL_KOLORS']:
  %cd ../
  ![ ! -d ComfyUI-KwaiKolorsWrapper ] && echo -= Initial setup KOLORS =- && git clone https://github.com/kijai/ComfyUI-KwaiKolorsWrapper.git
  %cd ComfyUI-KwaiKolorsWrapper
  !git pull

%cd $WORKSPACE

if OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES']:
  !pwd
  !echo "-= Install custom nodes dependencies =-"
  ![ -f "custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py" ] && python "custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py"

!wget "https://modelscope.oss-cn-beijing.aliyuncs.com/resource/cloudflared-linux-amd64.deb"
!dpkg -i cloudflared-linux-amd64.deb

下载模型

#@markdown ###Download standard resources

OPTIONS = {}

#@markdown **unet**

!wget -c "https://modelscope.cn/models/Kwai-Kolors/Kolors/resolve/master/unet/diffusion_pytorch_model.fp16.safetensors" -P ./models/diffusers/Kolors/unet/
!wget -c "https://modelscope.cn/models/Kwai-Kolors/Kolors/resolve/master/unet/config.json" -P ./models/diffusers/Kolors/unet/


#@markdown **encoder**

!modelscope download --model=ZhipuAI/chatglm3-6b-base --local_dir ./models/diffusers/Kolors/text_encoder/

#@markdown **vae**

!wget -c "https://modelscope.cn/models/AI-ModelScope/sdxl-vae-fp16-fix/resolve/master/sdxl.vae.safetensors" -P ./models/vae/ #sdxl-vae-fp16-fix.safetensors

#@markdown **scheduler**
!wget -c "https://modelscope.cn/models/Kwai-Kolors/Kolors/resolve/master/scheduler/scheduler_config.json" -P ./models/diffusers/Kolors/scheduler/

#@markdown **modelindex**
!wget -c "https://modelscope.cn/models/Kwai-Kolors/Kolors/resolve/master/model_index.json" -P ./models/diffusers/Kolors/

安装 LoRA 节点

lora_node = """
import torch
from peft import LoraConfig, inject_adapter_in_model


class LoadKolorsLoRA:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "kolors_model": ("KOLORSMODEL", ),
                "lora_path": ("STRING", {"multiline": False, "default": "",}),
                "lora_alpha": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 4.0, "step": 0.01}),
            },
        }

    RETURN_TYPES = ("KOLORSMODEL",)
    RETURN_NAMES = ("kolors_model",)
    FUNCTION = "add_lora"
    CATEGORY = "KwaiKolorsWrapper"

    def convert_state_dict(self, state_dict):
        prefix_rename_dict = {
            "blocks.7.transformer_blocks": "down_blocks.1.attentions.0.transformer_blocks",
            "blocks.10.transformer_blocks": "down_blocks.1.attentions.1.transformer_blocks",
            "blocks.15.transformer_blocks": "down_blocks.2.attentions.0.transformer_blocks",
            "blocks.18.transformer_blocks": "down_blocks.2.attentions.1.transformer_blocks",
            "blocks.21.transformer_blocks": "mid_block.attentions.0.transformer_blocks",
            "blocks.25.transformer_blocks": "up_blocks.0.attentions.0.transformer_blocks",
            "blocks.28.transformer_blocks": "up_blocks.0.attentions.1.transformer_blocks",
            "blocks.31.transformer_blocks": "up_blocks.0.attentions.2.transformer_blocks",
            "blocks.35.transformer_blocks": "up_blocks.1.attentions.0.transformer_blocks",
            "blocks.38.transformer_blocks": "up_blocks.1.attentions.1.transformer_blocks",
            "blocks.41.transformer_blocks": "up_blocks.1.attentions.2.transformer_blocks",
        }
        suffix_rename_dict = {
            ".to_out.lora_A.default.weight": ".to_out.0.lora_A.default.weight",
            ".to_out.lora_B.default.weight": ".to_out.0.lora_B.default.weight",
        }
        state_dict_ = {}
        for name, param in state_dict.items():
            for prefix in prefix_rename_dict:
                if name.startswith(prefix):
                    name = name.replace(prefix, prefix_rename_dict[prefix])
            for suffix in suffix_rename_dict:
                if name.endswith(suffix):
                    name = name.replace(suffix, suffix_rename_dict[suffix])
            state_dict_[name] = param
        lora_rank = state_dict_["up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.lora_A.default.weight"].shape[0]
        return state_dict_, lora_rank

    def load_lora(self, model, lora_rank, lora_alpha, state_dict):
        lora_config = LoraConfig(
            r=lora_rank,
            lora_alpha=lora_alpha,
            init_lora_weights="gaussian",
            target_modules=["to_q", "to_k", "to_v", "to_out.0"],
        )
        model = inject_adapter_in_model(lora_config, model)
        model.load_state_dict(state_dict, strict=False)
        return model

    def add_lora(self, kolors_model, lora_path, lora_alpha):
        state_dict = torch.load(lora_path, map_location="cpu")
        state_dict, lora_rank = self.convert_state_dict(state_dict)
        kolors_model["pipeline"].unet = self.load_lora(kolors_model["pipeline"].unet, lora_rank, lora_alpha, state_dict)
        return (kolors_model,)


NODE_CLASS_MAPPINGS = {
    "LoadKolorsLoRA": LoadKolorsLoRA,
}
NODE_DISPLAY_NAME_MAPPINGS = {
    "LoadKolorsLoRA": "Load Kolors LoRA",
}
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
""".strip()

import os

os.makedirs("/mnt/workspace/ComfyUI/custom_nodes/ComfyUI-LoRA", exist_ok=True)
with open("/mnt/workspace/ComfyUI/custom_nodes/ComfyUI-LoRA/__init__.py", "w", encoding="utf-8") as f:
    f.write(lora_node)

启动 ComfyUI

启动后,通过代码输出的链接查看 UI 页面 点击右侧“Load”,加载“kolors_example.json”(不带 LoRA)或者 “kolors_with_lora_example.json”(带 LoRA) 加载 LoRA 时,请在“lora_path”处填入 LoRA 模型的路径,例如 /mnt/workspace/models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt

%cd /mnt/workspace/ComfyUI
import subprocess
import threading
import time
import socket
import urllib.request

def iframe_thread(port):
  while True:
      time.sleep(0.5)
      sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
      result = sock.connect_ex(('127.0.0.1', port))
      if result == 0:
        break
      sock.close()
  print("\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\n")

  p = subprocess.Popen(["cloudflared", "tunnel", "--url", "http://127.0.0.1:{}".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  for line in p.stderr:
    l = line.decode()
    if "trycloudflare.com " in l:
      print("This is the URL to access ComfyUI:", l[l.find("http"):], end='')
    #print(l, end='')


threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()

!python main.py --dont-print-server

总结

## ComfyUI 样例指南
### 下载与安装
1. **环境搭建**:
- 设置一系列的安装参数(是否更新 ComfyUI、安装管理器、安装颜色包及依赖节点等)。
- 确定当前目录并创建工作区,下载 ComfyUI 代码仓库并根据需求进行更新。
2. **依赖管理**:
- 若选项允许,会下载并更新 ComfyUI 及其管理器(ComfyUI-Manager)和颜色包装(KwaiKolorsWrapper)等必要依赖。
- 通过运行指定的脚本文件,安装自定义节点所需依赖。
3. **云工具配置**:
- 安装并使用 cloudflared,以确保能通过链接远程访问本地运行的服务。
### 资源下载
- 下载标准的模型资源(包括 unet 模型、text_encoder 编码器、vae 向量编码器、调度器配置、模型索引文件等),并存放在指定位置。
### 安装 LoRA 节点
- 定义一个 `LoadKolorsLoRA` 类,实现 LoRA(低秩适配器)加载的逻辑。
- 通过映射关系和参数适配,使该节点能够将 LoRA 应用于 Kolors 模型的指定层。
- 创建新的模块 `ComfyUI-LoRA`,并在其初始化文件中编写上述节点类的定义。
### 启动 ComfyUI
- 启动脚本中使用了子进程运行 main.py 来启动服务。
- 使用多线程监测服务端口是否启动成功,并使用 cloudflared 生成公网可访问的链接。
- 服务启动后,通过访问指定的链接可访问 ComfyUI 的界面,用户可以通过该界面加载和使用示例文件。
### 注意事项
- 在加载包含 LoRA 的文件时,需要提供 LoRA 模型文件的具体路径。
- 服务可能由于云flare工具(cloudflared)的故障而导致访问受限,若服务卡在这一步骤,建议检查网络连接及云flare的相关服务。
此文档提供了一个详尽的步骤指导,用于配置并运行 ComfyUI,加载不同的资源文件和节点以构建自己的AI艺术创作流程。

更新时间 2024-09-23