Commit Files

This commit is contained in:
Render Node 2025-01-30 10:53:18 +00:00
parent 4deafd8224
commit d73604e85e
4 changed files with 389 additions and 20 deletions

2
.gitignore vendored
View File

@ -23,3 +23,5 @@ comfyui.prev.log
*.log
web_custom_versions/
.DS_Store
login/PASSWORD
chun_test.py

View File

@ -1,20 +1,216 @@
torch
torchsde
torchvision
torchaudio
einops
transformers>=4.28.1
tokenizers>=0.13.3
sentencepiece
safetensors>=0.4.2
aiohttp
pyyaml
Pillow
scipy
tqdm
psutil
#non essential dependencies:
kornia>=0.7.1
spandrel
soundfile
absl-py==2.1.0
accelerate==1.2.1
addict==2.4.0
aiohappyeyeballs==2.4.0
aiohttp==3.10.5
aiohttp-session==2.12.0
aiosignal==1.3.1
albucore==0.0.20
albumentations==1.4.21
altair==5.4.1
annotated-types==0.7.0
antlr4-python3-runtime==4.9.3
anyio==4.6.2.post1
async-timeout==4.0.3
attrs==24.2.0
bcrypt==4.2.0
bitsandbytes==0.44.1
cachetools==5.5.0
came-pytorch==0.1.3D:
certifi==2024.7.4
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.3.2
click==8.1.7
cmake==3.30.2
color-matcher==0.5.0
colorama==0.4.6
coloredlogs==15.0.1
colorlog==6.9.0
contourpy==1.2.1
cryptography==43.0.0
cssselect2==0.7.0
cstr @ git+https://github.com/WASasquatch/cstr@0520c29a18a7a869a6e5983861d6f7a4c86f8e9b
cycler==0.12.1
datasets==3.1.0
ddt==1.7.2
decord==0.6.0
Deprecated==1.2.14
diffusers==0.31.0
dill==0.3.8
diskcache==5.6.3
docutils==0.21.2
einops==0.8.0
eval_type_backport==0.2.0
exceptiongroup==1.2.2
fairscale==0.4.13
ffmpy @ git+https://github.com/WASasquatch/ffmpy.git@f000737698b387ffaeab7cd871b0e9185811230d
filelock==3.13.1
flatbuffers==24.3.25
fonttools==4.53.1
frozenlist==1.4.1
fsspec==2024.2.0
ftfy==6.2.3
fvcore==0.1.5.post20221221
gevent==24.10.1
gitdb==4.0.11
GitPython==3.1.43
greenlet==3.1.1
h11==0.14.0
httpcore==1.0.6
httpx==0.27.2
huggingface-hub==0.24.6
humanfriendly==10.0
idna==3.7
imageio==2.35.1
imageio-ffmpeg==0.5.1
imagesize==1.4.1
img2texture @ git+https://github.com/WASasquatch/img2texture.git@a546609a915caf1dcb84a15bf44b5b639ad924e6
importlib_metadata==8.4.0
intel-openmp==2021.4.0
iopath==0.1.10
jax==0.4.35
jaxlib==0.4.35
Jinja2==3.1.3
joblib==1.4.2
jsonschema==4.23.0
jsonschema-specifications==2023.12.1
kiwisolver==1.4.5
kornia==0.7.3
kornia_rs==0.1.5
lazy_loader==0.4
lion-pytorch==0.2.2
llama_cpp_python @ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.89+cpuavx2-cp310-cp310-win_amd64.whl
llama_cpp_python_cuda @ https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.89+cu121-cp310-cp310-win_amd64.whl
llvmlite==0.43.0
lxml==5.3.0
manifold3d==2.5.1
mapbox_earcut==1.0.2
markdown-it-py==3.0.0
MarkupSafe==2.1.5
matplotlib==3.8.0
matrix-client==0.4.0
mdurl==0.1.2
mediapipe==0.10.18
mkl==2021.4.0
ml_dtypes==0.5.0
mpmath==1.3.0
mss==9.0.1
multidict==6.0.5
multiprocess==0.70.16
narwhals==1.5.5
networkx==3.2.1
numba==0.60.0
numpy==1.26.3
omegaconf==2.3.0
onnxruntime==1.19.0
onnxruntime-gpu==1.19.2
opencv-contrib-python==4.10.0.84
opencv-python==4.10.0.84
opencv-python-headless==4.10.0.84
opt_einsum==3.4.0
packaging==24.1
pandas==2.2.2
pathlib==1.0.1
peft==0.12.0
piexif==1.1.3
pilgram==1.2.1
pillow==10.4.0
platformdirs==4.2.2
pooch==1.8.2
portalocker==2.10.1
prodigyopt==1.0
protobuf==4.25.5
psutil==6.0.0
py-cpuinfo==9.0.0
pyarrow==18.0.0
pycollada==0.8
pycparser==2.22
pycryptodome==3.20.0
pydantic==2.9.2
pydantic_core==2.23.4
PyGithub==2.3.0
Pygments==2.18.0
PyJWT==2.9.0
PyMatting==1.1.12
PyNaCl==1.5.0
pyparsing==3.1.2
pyreadline3==3.4.1
python-dateutil==2.9.0.post0
pytz==2024.1
PyWavelets==1.8.0
pywin32==308
PyYAML==6.0.2
qrcode==8.0
referencing==0.35.1
regex==2024.7.24
rembg==2.0.58
reportlab==4.2.5
requests==2.32.3
requirements-parser==0.11.0
rich==13.7.1
rich-argparse==1.5.2
rpds-py==0.20.0
Rtree==1.3.0
safetensors==0.4.4
schedulefree==1.2.7
scikit-image==0.24.0
scikit-learn==1.5.1
scipy==1.14.0
seaborn==0.13.2
segment-anything==1.0
sentencepiece==0.2.0
shapely==2.0.6
shellingham==1.5.4
simsimd==5.9.11
six==1.16.0
smmap==5.0.1
sniffio==1.3.1
sounddevice==0.5.1
soundfile==0.12.1
spandrel==0.3.4
stringzilla==3.10.7
svg.path==6.3
svglib==1.5.1
sympy==1.13.1
tabulate==0.9.0
tbb==2021.11.0
termcolor==2.5.0
threadpoolctl==3.5.0
tifffile==2024.8.10
timm==1.0.8
tinycss2==1.4.0
tokenizers==0.21.0
tomesd==0.1.3
toml==0.10.2
tomli==2.0.2
torch==2.5.0+cu121
torchaudio==2.5.0+cu121
torchsde==0.2.6
torchvision==0.20.0+cu121
tqdm==4.66.5
trampoline==0.1.2
transformers==4.47.1
trimesh==4.5.1
typer==0.12.4
types-setuptools==72.2.0.20240821
typing_extensions==4.12.2
tzdata==2024.1
ultralytics==8.2.79
ultralytics-thop==2.0.5
urllib3==1.26.19
vhacdx==0.0.8.post1
voluptuous==0.15.2
wcwidth==0.2.13
webcolors==24.8.0
webencodings==0.5.1
websocket-client==1.8.0
wrapt==1.16.0
xatlas==0.0.9
xxhash==3.5.0
yacs==0.1.8
yapf==0.40.2
yarl==1.9.4
zipp==3.20.0
zope.event==5.0
zope.interface==7.1.0

View File

@ -0,0 +1,170 @@
#This is an example that uses the websockets api to know when a prompt execution is done
#Once the prompt execution is done it downloads the images using the /history endpoint
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
import uuid
import json
import urllib.request
import urllib.parse
server_address = "192.168.0.210:8188"
client_id = str(uuid.uuid4())
# TOKEN is stored in the file `./PASSWORD`, or you can obtain it from the command line window when ComfyUI starts.
# It will appear like this:
TOKEN = "$2b$12$3VB7LtBjyEgvc.ATl9XagO2Yh9Ox0.0Nci0khTA2mv4UmkzoNyzn."
# If you get errors like: HTTP Error 400: Bad Request, please check the server's console for more detailed error message.
# Sometimes it's related to the model file's filename.
def queue_prompt(prompt):
p = {"prompt": prompt, "client_id": client_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt?token={}".format(server_address, TOKEN), data=data)
return json.loads(urllib.request.urlopen(req).read())
def get_image(filename, subfolder, folder_type):
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
with urllib.request.urlopen("http://{}/view?{}&token={}".format(server_address, url_values, TOKEN)) as response:
return response.read()
def get_history(prompt_id):
with urllib.request.urlopen("http://{}/history/{}?token={}".format(server_address, prompt_id, TOKEN)) as response:
return json.loads(response.read())
def get_images(ws, prompt):
prompt_id = queue_prompt(prompt)['prompt_id']
output_images = {}
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'executing':
data = message['data']
if data['node'] is None and data['prompt_id'] == prompt_id:
break #Execution is done
else:
continue #previews are binary data
history = get_history(prompt_id)[prompt_id]
for o in history['outputs']:
for node_id in history['outputs']:
node_output = history['outputs'][node_id]
if 'images' in node_output:
images_output = []
for image in node_output['images']:
image_data = get_image(image['filename'], image['subfolder'], image['type'])
images_output.append(image_data)
output_images[node_id] = images_output
return output_images
prompt_text = """
{
"3": {
"class_type": "KSampler",
"inputs": {
"cfg": 8,
"denoise": 1,
"latent_image": [
"5",
0
],
"model": [
"4",
0
],
"negative": [
"7",
0
],
"positive": [
"6",
0
],
"sampler_name": "euler",
"scheduler": "normal",
"seed": 8566257,
"steps": 20
}
},
"4": {
"class_type": "CheckpointLoaderSimple",
"inputs": {
"ckpt_name": "realvisxlV40_v40LightningBakedvae.safetensors"
}
},
"5": {
"class_type": "EmptyLatentImage",
"inputs": {
"batch_size": 1,
"height": 512,
"width": 512
}
},
"6": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "masterpiece best quality noodles"
}
},
"7": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "bad hands"
}
},
"8": {
"class_type": "VAEDecode",
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
}
},
"9": {
"class_type": "SaveImage",
"inputs": {
"filename_prefix": "ComfyUI",
"output_dir": "None",
"images": [
"8",
0
]
}
}
}
"""
prompt = json.loads(prompt_text)
#set the text prompt for our positive CLIPTextEncode
prompt["6"]["inputs"]["text"] = "masterpiece best quality apple"
#set the seed for our KSampler node
prompt["3"]["inputs"]["seed"] = 5
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}&token={}".format(server_address, client_id, TOKEN))
images = get_images(ws, prompt)
#Commented out code to display the output images:
for node_id in images:
for image_data in images[node_id]:
from PIL import Image
import io
image = Image.open(io.BytesIO(image_data))
image.show()