Skip to content

Commit 8b44d7b

Browse files
committed
Lint
1 parent a47de06 commit 8b44d7b

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

modules/models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def huggingface_loader(model_name):
147147
params['force_safetensors'] = True
148148

149149
if shared.args.use_eager_attention:
150-
params['attn_implementation'] = 'eager'
150+
params['attn_implementation'] = 'eager'
151151

152152
config = AutoConfig.from_pretrained(path_to_model, trust_remote_code=shared.args.trust_remote_code)
153153

modules/ui_model_menu.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def create_ui():
115115
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
116116
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant)
117117
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
118-
shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.')
118+
shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.')
119119
shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.')
120120
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
121121
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.')

0 commit comments

Comments
 (0)