@@ -34,13 +34,13 @@ classifiers = [
3434 ' Topic :: Scientific/Engineering :: Image Processing' ,
3535]
3636dependencies = [
37- " accelerate" ,
37+ " accelerate~=0.16 " ,
3838 " albumentations" ,
3939 " click" ,
4040 " clip_anytorch" , # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
4141 " compel==0.1.7" ,
4242 " datasets" ,
43- " diffusers[torch]~=0.13 " ,
43+ " diffusers[torch]~=0.14 " ,
4444 " dnspython==2.2.1" ,
4545 " einops" ,
4646 " eventlet" ,
@@ -73,7 +73,7 @@ dependencies = [
7373 " pytorch-lightning==1.7.7" ,
7474 " realesrgan" ,
7575 " requests==2.28.2" ,
76- " safetensors" ,
76+ " safetensors~=0.3.0 " ,
7777 " scikit-image>=0.19" ,
7878 " send2trash" ,
7979 " streamlit" ,
@@ -83,7 +83,7 @@ dependencies = [
8383 " torch-fidelity" ,
8484 " torchvision>=0.14.1" ,
8585 " torchmetrics" ,
86- " transformers~=4.25 " ,
86+ " transformers~=4.26 " ,
8787 " uvicorn[standard]==0.20.0" ,
8888 " windows-curses; sys_platform=='win32'" ,
8989]
@@ -132,7 +132,7 @@ version = { attr = "invokeai.version.__version__" }
132132[tool .setuptools .packages .find ]
133133"where" = [" ." ]
134134"include" = [
135- " invokeai.assets.web*" ," invokeai.version*" ,
135+ " invokeai.assets.web*" ," invokeai.version*" ,
136136 " invokeai.generator*" ," invokeai.backend*" ,
137137 " invokeai.frontend*" , " invokeai.frontend.web.dist*" ,
138138 " invokeai.configs*" ,
0 commit comments