Skip to content

Commit

Permalink
test load balancing server locally
Browse files Browse the repository at this point in the history
  • Loading branch information
wolverinn committed May 27, 2023
1 parent 7f7720d commit feb14f2
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 11 deletions.
13 changes: 12 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ In the main project directory:
1. SSH to the GPU server
2. clone or download the repository
3. cd to the main project directory(that contains `manage.py`)
4. run `sudo bash setup.sh` with options(checkout the `setup.sh` for options)
4. run `sudo bash setup.sh` with options(checkout the `setup.sh` for options)(recommende order: follow the file order)
- if some downloads are slow, you can always download manually and upload to your server
5. restart apache: `sudo service apache2 restart`

Expand Down Expand Up @@ -126,5 +126,16 @@ models: list<string>
5. run `mv sd_multi/urls.py sd_multi/urls1.py && mv sd_multi/urls_lb.py sd_multi/urls.py`
6. modify `ip_list` variable with your own server ip+port in `simple/lb_views.py`
7. restart apache: `sudo service apache2 restart`
8. to test it, view `ip+port/multi_demo/` url path

## Test the load-balancing server locally
If you don't want to deploy the load balancing server but still want to test the functions, you can start the load-balancing server on your local computer.

1. clone or download the repository
2. requirements: python3, django, django-cors-headers, replicate
3. modify `ip_list` variable with your own GPU server ip+port in `simple/lb_views.py`
4. cd to the main project directory(that contains `manage.py`)
5. run `python manage.py runserver`
6. click the url that shows up in the terminal, view `/multi_demo/` path

Finally, you can call your http API(test it using postman).
1 change: 0 additions & 1 deletion sd_multi/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
urlpatterns = [
path('home/', views.homepage),
# web tti
path('multi_demo/', lb_views.demo_page),
path('txt2img/', views.txt2img),
path('img2img/', views.img2img),
path('progress/', views.progress),
Expand Down
1 change: 1 addition & 0 deletions sd_multi/urls_lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

# load balancing
urlpatterns = [
path('multi_demo/', lb_views.demo_page),
path('txt2img/', lb_views.txt2img),
path('txt2img_fallback/', lb_views.txt2img_with_fallback),
path('img2img/', lb_views.img2img),
Expand Down
13 changes: 7 additions & 6 deletions setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@ elif [ $1 == "venv" ]; then
pip3 install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
pip3 install torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
deactivate
elif [ $1 == "sd_model" ]; then
# download models
wget -P ./models/Stable-diffusion https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Stable-diffusion/chilloutmix_NiPrunedFp32Fix.safetensors
wget -P ./models/VAE https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/VAE/vae-ft-mse-840000-ema-pruned.ckpt
wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/koreanDollLikeness_v10.safetensors
wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/taiwanDollLikeness_v10.safetensors
elif [ $1 == "apache" ]; then
# configure apache
cd ..
Expand All @@ -42,12 +48,6 @@ elif [ $1 == "apache" ]; then
chgrp www-data www/
chmod g+w www/
service apache2 restart
elif [ $1 == "sd_model" ]; then
# download models
wget -P ./models/Stable-diffusion https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Stable-diffusion/chilloutmix_NiPrunedFp32Fix.safetensors
wget -P ./models/VAE https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/VAE/vae-ft-mse-840000-ema-pruned.ckpt
wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/koreanDollLikeness_v10.safetensors
wget -P ./models/Lora https://huggingface.co/Hardy01/chill_watcher/resolve/main/models/Lora/taiwanDollLikeness_v10.safetensors
elif [ $1 == "lb" ]; then
apt-get update
apt-get install python3-pip
Expand All @@ -57,6 +57,7 @@ elif [ $1 == "lb" ]; then
source ./venv/bin/activate
pip3 install django
pip3 install django-cors-headers
pip3 install replicate
python3 gen_http_conf.py
service apache2 restart
else
Expand Down
15 changes: 12 additions & 3 deletions simple/templates/multi_demo.html
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,24 @@
return;
}
let task_id = 1;
var timestamp = new Date().getTime();
var timestampInt = parseInt(timestamp / 1000);
function gen_task_id() {
return timestampInt.toString() + "_" + task_id.toString()
};
function generate() {
disableBtn();

var prompt = $("#prompt").val();
var model_name = $("#model").val();
$.ajax({
url: "/txt2img/",
type: "POST",
dataType: "json",
data: JSON.stringify({
prompt: prompt,
task_id: task_id.toString(),
model: model_name,
task_id: gen_task_id(),
}),
success: function (response) {
console.log("generate success, resp: ", response);
Expand Down Expand Up @@ -97,7 +104,7 @@
dataType: "json",
data: JSON.stringify({
prompt: prompt,
task_id: task_id.toString(),
task_id: gen_task_id(),
}),
success: function (response) {
console.log("generate success, resp: ", response);
Expand Down Expand Up @@ -157,7 +164,7 @@
type: "POST",
dataType: "json",
data: JSON.stringify({
task_id: task_id.toString(),
task_id: gen_task_id(),
}),
success: function (response) {
console.log("generate success, resp: ", response);
Expand Down Expand Up @@ -256,6 +263,8 @@
<div class="page-element">
<h1>Multi-User SD demo</h1>
<div id="input-box">
<label for="model">choose model(you can leave it empty):</label>
<input id="model" type="text"></input>
<textarea
type="text"
id="prompt"
Expand Down

0 comments on commit feb14f2

Please sign in to comment.