diff --git a/README.md b/README.md index 9cb8b09..6ff1465 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ In the main project directory: 1. SSH to the GPU server 2. clone or download the repository 3. cd to the main project directory(that contains `manage.py`) -4. run `sudo bash setup.sh` with options(checkout the `setup.sh` for options) +4. run `sudo bash setup.sh` with options(checkout the `setup.sh` for options)(recommende order: follow the file order) - if some downloads are slow, you can always download manually and upload to your server 5. restart apache: `sudo service apache2 restart` @@ -126,5 +126,16 @@ models: list 5. run `mv sd_multi/urls.py sd_multi/urls1.py && mv sd_multi/urls_lb.py sd_multi/urls.py` 6. modify `ip_list` variable with your own server ip+port in `simple/lb_views.py` 7. restart apache: `sudo service apache2 restart` +8. to test it, view `ip+port/multi_demo/` url path + +## Test the load-balancing server locally +If you don't want to deploy the load balancing server but still want to test the functions, you can start the load-balancing server on your local computer. + +1. clone or download the repository +2. requirements: python3, django, django-cors-headers, replicate +3. modify `ip_list` variable with your own GPU server ip+port in `simple/lb_views.py` +4. cd to the main project directory(that contains `manage.py`) +5. run `python manage.py runserver` +6. click the url that shows up in the terminal, view `/multi_demo/` path Finally, you can call your http API(test it using postman). \ No newline at end of file diff --git a/sd_multi/urls.py b/sd_multi/urls.py index a4511a6..9eb62f7 100644 --- a/sd_multi/urls.py +++ b/sd_multi/urls.py @@ -21,7 +21,6 @@ urlpatterns = [ path('home/', views.homepage), # web tti - path('multi_demo/', lb_views.demo_page), path('txt2img/', views.txt2img), path('img2img/', views.img2img), path('progress/', views.progress), diff --git a/sd_multi/urls_lb.py b/sd_multi/urls_lb.py index e805ead..0d5a2a4 100644 --- a/sd_multi/urls_lb.py +++ b/sd_multi/urls_lb.py @@ -29,6 +29,7 @@ # load balancing urlpatterns = [ + path('multi_demo/', lb_views.demo_page), path('txt2img/', lb_views.txt2img), path('txt2img_fallback/', lb_views.txt2img_with_fallback), path('img2img/', lb_views.img2img), diff --git a/setup.sh b/setup.sh index ea417ee..53bec92 100644 --- a/setup.sh +++ b/setup.sh @@ -25,6 +25,12 @@ elif [ $1 == "venv" ]; then pip3 install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 pip3 install torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 deactivate +elif [ $1 == "sd_model" ]; then + # download models + wget -P ./models/Stable-diffusion https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Stable-diffusion/chilloutmix_NiPrunedFp32Fix.safetensors + wget -P ./models/VAE https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/VAE/vae-ft-mse-840000-ema-pruned.ckpt + wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/koreanDollLikeness_v10.safetensors + wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/taiwanDollLikeness_v10.safetensors elif [ $1 == "apache" ]; then # configure apache cd .. @@ -42,12 +48,6 @@ elif [ $1 == "apache" ]; then chgrp www-data www/ chmod g+w www/ service apache2 restart -elif [ $1 == "sd_model" ]; then - # download models - wget -P ./models/Stable-diffusion https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Stable-diffusion/chilloutmix_NiPrunedFp32Fix.safetensors - wget -P ./models/VAE https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/VAE/vae-ft-mse-840000-ema-pruned.ckpt - wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/koreanDollLikeness_v10.safetensors - wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/taiwanDollLikeness_v10.safetensors elif [ $1 == "lb" ]; then apt-get update apt-get install python3-pip @@ -57,6 +57,7 @@ elif [ $1 == "lb" ]; then source ./venv/bin/activate pip3 install django pip3 install django-cors-headers + pip3 install replicate python3 gen_http_conf.py service apache2 restart else diff --git a/simple/templates/multi_demo.html b/simple/templates/multi_demo.html index 3cc3080..c8043ee 100644 --- a/simple/templates/multi_demo.html +++ b/simple/templates/multi_demo.html @@ -46,17 +46,24 @@ return; } let task_id = 1; + var timestamp = new Date().getTime(); + var timestampInt = parseInt(timestamp / 1000); + function gen_task_id() { + return timestampInt.toString() + "_" + task_id.toString() + }; function generate() { disableBtn(); var prompt = $("#prompt").val(); + var model_name = $("#model").val(); $.ajax({ url: "/txt2img/", type: "POST", dataType: "json", data: JSON.stringify({ prompt: prompt, - task_id: task_id.toString(), + model: model_name, + task_id: gen_task_id(), }), success: function (response) { console.log("generate success, resp: ", response); @@ -97,7 +104,7 @@ dataType: "json", data: JSON.stringify({ prompt: prompt, - task_id: task_id.toString(), + task_id: gen_task_id(), }), success: function (response) { console.log("generate success, resp: ", response); @@ -157,7 +164,7 @@ type: "POST", dataType: "json", data: JSON.stringify({ - task_id: task_id.toString(), + task_id: gen_task_id(), }), success: function (response) { console.log("generate success, resp: ", response); @@ -256,6 +263,8 @@

Multi-User SD demo

+ +