DemiPoto commited on
Commit
9027be6
1 Parent(s): ab941e4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -75
app.py CHANGED
@@ -1,14 +1,25 @@
1
  import os
2
  import gradio as gr
3
  from random import randint
4
- from all_models import models
 
5
  from datetime import datetime
6
-
 
 
7
  from threading import RLock
8
  lock = RLock()
 
 
9
 
10
  now2 = 0
11
- nb_models=24
 
 
 
 
 
 
12
 
13
  def split_models(models,nb_models):
14
  models_temp=[]
@@ -38,7 +49,7 @@ def split_models_axb(models,a,b):
38
  models_temp=[]
39
  if len(models_temp)>1:
40
  models_lis_temp.append(models_temp)
41
- return models_lis_temp , a*b
42
 
43
  def split_models_8x3(models,nb_models):
44
  models_temp=[]
@@ -55,18 +66,31 @@ def split_models_8x3(models,nb_models):
55
  models_lis_temp.append(models_temp+models_temp+models_temp)
56
  return models_lis_temp
57
 
58
- """models_test=split_models_x3(models,nb_models)"""
59
- """models_test=split_models(models,nb_models)"""
60
- models_test , nb_models =split_models_axb(models,2,20)
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  def get_current_time():
63
  now = datetime.now()
64
  now2 = now
65
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
 
66
  ki = f'{kii} {current_time}'
67
  return ki
68
-
69
- def load_fn(models):
70
  global models_load
71
  global num_models
72
  global default_models
@@ -82,10 +106,35 @@ def load_fn(models):
82
  m = gr.load(f'models/{model}')
83
  except Exception as error:
84
  m = gr.Interface(lambda txt: None, ['text'], ['image'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  models_load.update({model: m})
86
 
87
 
88
  """models = models_test[1]"""
 
89
  load_fn(models)
90
  """models = {}
91
  load_fn(models)"""
@@ -101,42 +150,124 @@ def extend_choices_b(choices):
101
 
102
  def update_imgbox(choices):
103
  choices_plus = extend_choices(choices)
104
- return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
105
 
106
  def choice_group_a(group_model_choice):
107
- for m in models_test:
108
- if group_model_choice==m[1]:
109
- choice=m
110
- print(choice)
111
- return choice
112
 
113
  def choice_group_b(group_model_choice):
114
- choice=choice_group_a(group_model_choice)
115
- choice = extend_choices(choice)
116
  """return [gr.Image(label=m, min_width=170, height=170) for m in choice]"""
117
- return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choice]
118
 
119
  def choice_group_c(group_model_choice):
120
- choice=choice_group_a(group_model_choice)
121
- choice = extend_choices(choice)
122
- return [gr.Textbox(m, visible=False) for m in choice]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- def choice_group_d(var_Test):
125
- (gen_button,stop_button,output,current_models)=var_Test
126
- for m, o in zip(current_models, output):
127
- gen_event = gen_button.click(gen_fn, [m, txt_input], o)
128
- stop_button.click(lambda s: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
129
- return gen_event
 
 
 
130
 
131
  def test_pass(test):
132
  if test==os.getenv('p'):
133
  print("ok")
134
- return gr.Dropdown(label="test Model", show_label=False, choices=list(models_test) , allow_custom_value=True)
135
  else:
136
  print("nop")
137
- return gr.Dropdown(label="test Model", show_label=False, choices=list([]) , allow_custom_value=True)
138
 
139
- def gen_fn(model_str, prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  if model_str == 'NA':
141
  return None
142
  noise = str(randint(0, 9999))
@@ -144,75 +275,215 @@ def gen_fn(model_str, prompt):
144
  m=models_load[model_str](f'{prompt} {noise}')
145
  except Exception as error :
146
  print("error : " + model_str)
 
147
  m=False
148
 
149
  return m
150
 
 
151
  def add_gallery(image, model_str, gallery):
152
  if gallery is None: gallery = []
153
- with lock:
154
- if image is not None: gallery.insert(0, (image, model_str))
155
  return gallery
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  def make_me():
158
  # with gr.Tab('The Dream'):
159
  with gr.Row():
160
  #txt_input = gr.Textbox(lines=3, width=300, max_height=100)
161
- txt_input = gr.Textbox(label='Your prompt:', lines=3, width=300, max_height=100)
162
-
163
- gen_button = gr.Button('Generate images', width=150, height=30)
164
- stop_button = gr.Button('Stop', variant='secondary', interactive=False, width=150, height=30)
165
- gen_button.click(lambda s: gr.update(interactive=True), None, stop_button)
166
- gr.HTML("""
167
- <div style="text-align: center; max-width: 100%; margin: 0 auto;">
168
- <body>
169
- </body>
170
- </div>
171
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  with gr.Row():
173
  """output = [gr.Image(label=m, min_width=170, height=170) for m in default_models]
174
  current_models = [gr.Textbox(m, visible=False) for m in default_models]"""
175
  """choices=[models_test[0][0]]"""
176
- choices=models_test[0]
177
  """output = [gr.Image(label=m, min_width=170, height=170) for m in choices]
178
  current_models = [gr.Textbox(m, visible=False) for m in choices]"""
179
- output = update_imgbox([choices[0]])
180
- current_models = extend_choices_b([choices[0]])
 
 
 
181
 
182
- #with gr.Row():
183
- # gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
184
- # interactive=False, show_share_button=True, container=True, format="png",
185
- # preview=True, object_fit="cover", columns=2, rows=2)
186
 
187
- for m, o in zip(current_models, output):
188
- gen_event = gen_button.click(gen_fn, [m, txt_input], o)
189
- stop_button.click(lambda s: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
190
- #o.change(add_gallery, [o, m, gallery], [gallery])
191
- """with gr.Accordion('Model selection'):
192
- model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, multiselect=True, max_choices=num_models, interactive=True, filterable=False)
193
- model_choice.change(update_imgbox, (gen_button,stop_button,group_model_choice), output)
194
- model_choice.change(extend_choices, model_choice, current_models)
195
- """
196
-
197
- with gr.Accordion("test", open=True):
198
- """group_model_choice = gr.Dropdown(label="test Model", show_label=False, choices=list(models_test) , allow_custom_value=True)"""
199
- group_model_choice = gr.Dropdown(label="test Model", show_label=False, choices=list([]) , allow_custom_value=True)
200
- group_model_choice.change(choice_group_b,group_model_choice,output)
201
- group_model_choice.change(choice_group_c,group_model_choice,current_models)
202
- """group_model_choice.change(choice_group_d,(gen_button,stop_button,output,current_models),gen_event)"""
203
  with gr.Row():
204
- txt_input_p = gr.Textbox(label='test', lines=1, width=300, max_height=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- test_button = gr.Button('test', width=30, height=10)
207
- test_button.click(test_pass,txt_input_p,group_model_choice)
208
  with gr.Row():
 
 
209
  gr.HTML("""
210
  <div class="footer">
211
  <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier!
212
  </p>
213
  """)
214
-
215
-
216
 
217
  js_code = """
218
 
@@ -222,11 +493,12 @@ js_code = """
222
  """
223
 
224
 
225
- with gr.Blocks(css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo:
226
  gr.Markdown("<script>" + js_code + "</script>")
227
  make_me()
228
 
229
 
230
-
231
- demo.queue(concurrency_count=999)
232
- demo.launch()
 
 
1
  import os
2
  import gradio as gr
3
  from random import randint
4
+ from operator import itemgetter
5
+ from all_models import tags_plus_models,models,models_plus_tags
6
  from datetime import datetime
7
+ from externalmod import gr_Interface_load
8
+ import asyncio
9
+ import os
10
  from threading import RLock
11
  lock = RLock()
12
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
+
14
 
15
  now2 = 0
16
+ inference_timeout = 300
17
+ MAX_SEED = 2**32-1
18
+
19
+
20
+ nb_rep=2
21
+ nb_mod_dif=20
22
+ nb_models=nb_mod_dif*nb_rep
23
 
24
  def split_models(models,nb_models):
25
  models_temp=[]
 
49
  models_temp=[]
50
  if len(models_temp)>1:
51
  models_lis_temp.append(models_temp)
52
+ return models_lis_temp
53
 
54
  def split_models_8x3(models,nb_models):
55
  models_temp=[]
 
66
  models_lis_temp.append(models_temp+models_temp+models_temp)
67
  return models_lis_temp
68
 
69
+ def construct_list_models(tags_plus_models,nb_rep,nb_mod_dif):
70
+ list_temp=[]
71
+ output=[]
72
+ for tag_plus_models in tags_plus_models:
73
+ list_temp=split_models_axb(tag_plus_models[2],nb_rep,nb_mod_dif)
74
+ list_temp2=[]
75
+ i=0
76
+ for elem in list_temp:
77
+ list_temp2.append([tag_plus_models[0]+"_"+str(i)+" : "+elem[0]+" - "+elem[len(elem)-1] ,elem])
78
+ i+=1
79
+ output.append([tag_plus_models[0] + " (" + str(tag_plus_models[1]) + ")",list_temp2])
80
+ return output
81
+
82
+ models_test = []
83
+ models_test = construct_list_models(tags_plus_models,nb_rep,nb_mod_dif)
84
 
85
  def get_current_time():
86
  now = datetime.now()
87
  now2 = now
88
  current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
89
+ kii = "" # ?
90
  ki = f'{kii} {current_time}'
91
  return ki
92
+
93
+ def load_fn_original(models):
94
  global models_load
95
  global num_models
96
  global default_models
 
106
  m = gr.load(f'models/{model}')
107
  except Exception as error:
108
  m = gr.Interface(lambda txt: None, ['text'], ['image'])
109
+ print(error)
110
+ models_load.update({model: m})
111
+
112
+ def load_fn(models):
113
+ global models_load
114
+ global num_models
115
+ global default_models
116
+ models_load = {}
117
+ num_models = len(models)
118
+ i=0
119
+ if num_models!=0:
120
+ default_models = models[:num_models]
121
+ else:
122
+ default_models = {}
123
+ for model in models:
124
+ i+=1
125
+ if i%50==0:
126
+ print("\n\n\n-------"+str(i)+'/'+str(len(models))+"-------\n\n\n")
127
+ if model not in models_load.keys():
128
+ try:
129
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
130
+ except Exception as error:
131
+ m = gr.Interface(lambda txt: None, ['text'], ['image'])
132
+ print(error)
133
  models_load.update({model: m})
134
 
135
 
136
  """models = models_test[1]"""
137
+ #load_fn_original
138
  load_fn(models)
139
  """models = {}
140
  load_fn(models)"""
 
150
 
151
  def update_imgbox(choices):
152
  choices_plus = extend_choices(choices)
153
+ return [gr.Image(None, label=m,interactive=False, visible=(m != 'NA')) for m in choices_plus]
154
 
155
  def choice_group_a(group_model_choice):
156
+ return group_model_choice
 
 
 
 
157
 
158
  def choice_group_b(group_model_choice):
159
+ choiceTemp =choice_group_a(group_model_choice)
160
+ choiceTemp = extend_choices(choiceTemp)
161
  """return [gr.Image(label=m, min_width=170, height=170) for m in choice]"""
162
+ return [gr.Image(None, label=m,interactive=False, visible=(m != 'NA')) for m in choiceTemp]
163
 
164
  def choice_group_c(group_model_choice):
165
+ choiceTemp=choice_group_a(group_model_choice)
166
+ choiceTemp = extend_choices(choiceTemp)
167
+ return [gr.Textbox(m, visible=False) for m in choiceTemp]
168
+
169
+
170
+ def cutStrg(longStrg,start,end):
171
+ shortStrg=''
172
+ for i in range(end-start):
173
+ shortStrg+=longStrg[start+i]
174
+ return shortStrg
175
+
176
+ def aff_models_perso(txt_list_perso,nb_models=nb_models,models=models):
177
+ list_perso=[]
178
+ t1=True
179
+ start=txt_list_perso.find('\"')
180
+ if start!=-1:
181
+ while t1:
182
+ start+=1
183
+ end=txt_list_perso.find('\"',start)
184
+ if end != -1:
185
+ txtTemp=cutStrg(txt_list_perso,start,end)
186
+ if txtTemp in models:
187
+ list_perso.append(cutStrg(txt_list_perso,start,end))
188
+ else :
189
+ t1=False
190
+ start=txt_list_perso.find('\"',end+1)
191
+ if start==-1:
192
+ t1=False
193
+ if len(list_perso)>=nb_models:
194
+ t1=False
195
+ return list_perso
196
 
197
+ def aff_models_perso_b(txt_list_perso):
198
+ return choice_group_b(aff_models_perso(txt_list_perso))
199
+
200
+ def aff_models_perso_c(txt_list_perso):
201
+ return choice_group_c(aff_models_perso(txt_list_perso))
202
+
203
+
204
+ def tag_choice(group_tag_choice):
205
+ return gr.Dropdown(label="List of Models with the chosen Tag", show_label=True, choices=list(group_tag_choice) , interactive = True , filterable = False)
206
 
207
  def test_pass(test):
208
  if test==os.getenv('p'):
209
  print("ok")
210
+ return gr.Dropdown(label="Lists Tags", show_label=True, choices=list(models_test) , interactive = True)
211
  else:
212
  print("nop")
213
+ return gr.Dropdown(label="Lists Tags", show_label=True, choices=list([]) , interactive = True)
214
 
215
+ def test_pass_aff(test):
216
+ if test==os.getenv('p'):
217
+ return gr.Accordion( open=True, visible=True)
218
+ else:
219
+ return gr.Accordion( open=True, visible=False)
220
+
221
+
222
+ # https://huggingface.co/docs/api-inference/detailed_parameters
223
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
224
+ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
225
+ from pathlib import Path
226
+ kwargs = {}
227
+ if height is not None and height >= 256: kwargs["height"] = height
228
+ if width is not None and width >= 256: kwargs["width"] = width
229
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
230
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
231
+ noise = ""
232
+ if seed >= 0: kwargs["seed"] = seed
233
+ else:
234
+ rand = randint(1, 500)
235
+ for i in range(rand):
236
+ noise += " "
237
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
238
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
239
+ await asyncio.sleep(0)
240
+ try:
241
+ result = await asyncio.wait_for(task, timeout=timeout)
242
+ except (Exception, asyncio.TimeoutError) as e:
243
+ print(e)
244
+ print(f"Task timed out: {model_str}")
245
+ if not task.done(): task.cancel()
246
+ result = None
247
+ if task.done() and result is not None:
248
+ with lock:
249
+ png_path = "image.png"
250
+ result.save(png_path)
251
+ image = str(Path(png_path).resolve())
252
+ return image
253
+ return None
254
+
255
+ def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
256
+ if model_str == 'NA':
257
+ return None
258
+ try:
259
+ loop = asyncio.new_event_loop()
260
+ result = loop.run_until_complete(infer(model_str, prompt, nprompt,
261
+ height, width, steps, cfg, seed, inference_timeout))
262
+ except (Exception, asyncio.CancelledError) as e:
263
+ print(e)
264
+ print(f"Task aborted: {model_str}")
265
+ result = None
266
+ finally:
267
+ loop.close()
268
+ return result
269
+
270
+ def gen_fn_original(model_str, prompt):
271
  if model_str == 'NA':
272
  return None
273
  noise = str(randint(0, 9999))
 
275
  m=models_load[model_str](f'{prompt} {noise}')
276
  except Exception as error :
277
  print("error : " + model_str)
278
+ print(error)
279
  m=False
280
 
281
  return m
282
 
283
+
284
  def add_gallery(image, model_str, gallery):
285
  if gallery is None: gallery = []
286
+ #with lock:
287
+ if image is not None: gallery.append((image, model_str))
288
  return gallery
289
 
290
+ def reset_gallery(gallery):
291
+ return add_gallery(None,"",[])
292
+
293
+ def load_gallery(gallery):
294
+ gallery = reset_gallery(gallery)
295
+ for c in cache_image:
296
+ gallery=add_gallery(c[0],c[1],gallery)
297
+ return gallery
298
+ def load_gallery_actu(gallery):
299
+ gallery = reset_gallery(gallery)
300
+ #for c in cache_image_actu:
301
+ for c in sorted(cache_image_actu, key=itemgetter(1)):
302
+ gallery=add_gallery(c[0],c[1],gallery)
303
+ return gallery
304
+
305
+ def add_cache_image(o,m):
306
+ cache_image.append((o,m))
307
+ #cache_image=sorted(cache_image, key=itemgetter(1))
308
+ return
309
+ def add_cache_image_actu(o,m):
310
+ cache_image_actu.append((o,m))
311
+ #cache_image_actu=sorted(cache_image_actu, key=itemgetter(1))
312
+ return
313
+ def reset_cache_image():
314
+ cache_image.clear()
315
+ return
316
+ def reset_cache_image_actu():
317
+ cache_image_actu.clear()
318
+ return
319
+
320
+ def disp_models(group_model_choice,nb_rep=nb_rep):
321
+ listTemp=[]
322
+ strTemp='\n'
323
+ i=0
324
+ for m in group_model_choice:
325
+ if m not in listTemp:
326
+ listTemp.append(m)
327
+ for m in listTemp:
328
+ i+=1
329
+ strTemp+="\"" + m + "\",\n"
330
+ if i%(8/nb_rep)==0:
331
+ strTemp+="\n"
332
+ return gr.Textbox(label="models",value=strTemp)
333
+
334
+ def search_models(str_search,tags_plus_models=tags_plus_models):
335
+ output1="\n"
336
+ output2=""
337
+ for m in tags_plus_models[0][2]:
338
+ if m.find(str_search)!=-1:
339
+ output1+="\"" + m + "\",\n"
340
+ outputPlus="\n From tags : \n\n"
341
+ for tag_plus_models in tags_plus_models:
342
+ if str_search.lower() == tag_plus_models[0].lower() and str_search!="":
343
+ for m in tag_plus_models[2]:
344
+ output2+="\"" + m + "\",\n"
345
+ if output2 != "":
346
+ output=output1+outputPlus+output2
347
+ else :
348
+ output=output1
349
+ return gr.Textbox(label="out",value=output)
350
+
351
+ def search_info(txt_search_info,models_plus_tags=models_plus_tags):
352
+ outputList=[]
353
+ if txt_search_info.find("\"")!=-1:
354
+ start=txt_search_info.find("\"")+1
355
+ end=txt_search_info.find("\"",start)
356
+ m_name=cutStrg(txt_search_info,start,end)
357
+ else :
358
+ m_name = txt_search_info
359
+ for m in models_plus_tags:
360
+ if m_name == m[0]:
361
+ outputList=m[1]
362
+ if len(outputList)==0:
363
+ outputList.append("Model Not Find")
364
+ return gr.Textbox(label="out",value=outputList)
365
+
366
+
367
  def make_me():
368
  # with gr.Tab('The Dream'):
369
  with gr.Row():
370
  #txt_input = gr.Textbox(lines=3, width=300, max_height=100)
371
+ #txt_input = gr.Textbox(label='Your prompt:', lines=3, width=300, max_height=100)
372
+ with gr.Column(scale=4):
373
+ with gr.Group():
374
+ txt_input = gr.Textbox(label='Your prompt:', lines=3)
375
+ with gr.Accordion("Advanced", open=False, visible=True):
376
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
377
+ with gr.Row():
378
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
379
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
380
+ with gr.Row():
381
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
382
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
383
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
384
+ #gen_button = gr.Button('Generate images', width=150, height=30)
385
+ #stop_button = gr.Button('Stop', variant='secondary', interactive=False, width=150, height=30)
386
+ gen_button = gr.Button('Generate images', scale=3)
387
+ stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
388
+
389
+ gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
390
+ #gr.HTML("""
391
+ #<div style="text-align: center; max-width: 100%; margin: 0 auto;">
392
+ # <body>
393
+ # </body>
394
+ #</div>
395
+ #""")
396
  with gr.Row():
397
  """output = [gr.Image(label=m, min_width=170, height=170) for m in default_models]
398
  current_models = [gr.Textbox(m, visible=False) for m in default_models]"""
399
  """choices=[models_test[0][0]]"""
400
+ choices=models_test[0][1][0][1]
401
  """output = [gr.Image(label=m, min_width=170, height=170) for m in choices]
402
  current_models = [gr.Textbox(m, visible=False) for m in choices]"""
403
+ global output_g
404
+ global current_models_g
405
+ output_g = update_imgbox([choices[0]])
406
+ current_models_g = extend_choices_b([choices[0]])
407
+
408
 
409
+ for m, o in zip(current_models_g, output_g):
410
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
411
+ inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o])
412
+ stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
  with gr.Row():
415
+ txt_input_p = gr.Textbox(label="Pass", lines=1)
416
+ test_button = gr.Button(' ')
417
+
418
+
419
+ with gr.Accordion( open=True, visible=False) as stuffs:
420
+ with gr.Accordion("Gallery",open=False):
421
+ with gr.Row():
422
+ global cache_image
423
+ global cache_image_actu
424
+ cache_image=[]
425
+ cache_image_actu=[]
426
+ with gr.Column():
427
+ b11 = gr.Button('Load Galerry Actu')
428
+ b12 = gr.Button('Load Galerry All')
429
+ gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
430
+ interactive=False, show_share_button=True, container=True, format="png",
431
+ preview=True, object_fit="cover",columns=4,rows=4)
432
+ with gr.Column():
433
+ b21 = gr.Button('Reset Gallery')
434
+ b22 = gr.Button('Reset Gallery All')
435
+ b11.click(load_gallery_actu,gallery,gallery)
436
+ b12.click(load_gallery,gallery,gallery)
437
+ b21.click(reset_gallery,gallery,gallery)
438
+ b22.click(reset_cache_image,[],gallery)
439
+ for m, o in zip(current_models_g, output_g):
440
+ #o.change(add_gallery, [o, m, gallery], [gallery])
441
+ o.change(add_cache_image,[o,m],[])
442
+ o.change(add_cache_image_actu,[o,m],[])
443
+ gen_button.click(reset_cache_image_actu, [], [])
444
+
445
+ with gr.Group():
446
+ with gr.Row():
447
+ group_tag_choice = gr.Dropdown(label="Lists Tags", show_label=True, choices=list([]) , interactive = True)
448
+
449
+ with gr.Row():
450
+ group_model_choice = gr.Dropdown(label="List of Models with the chosen Tag", show_label=True, choices=list([]) , interactive = True)
451
+ group_model_choice.change(choice_group_b,group_model_choice,output_g)
452
+ group_model_choice.change(choice_group_c,group_model_choice,current_models_g)
453
+ group_tag_choice.change(tag_choice,group_tag_choice,group_model_choice)
454
+
455
+ with gr.Row():
456
+ txt_list_models=gr.Textbox(label="Models Actu",value="")
457
+ group_model_choice.change(disp_models,group_model_choice,txt_list_models)
458
+
459
+ with gr.Row():
460
+ txt_list_perso = gr.Textbox(label='List Models Perso')
461
+
462
+ button_list_perso = gr.Button('Load')
463
+ button_list_perso.click(aff_models_perso_b,txt_list_perso,output_g)
464
+ button_list_perso.click(aff_models_perso_c,txt_list_perso,current_models_g)
465
+
466
+ with gr.Row():
467
+ txt_search = gr.Textbox(label='Search in')
468
+ txt_output_search = gr.Textbox(label='Search out')
469
+ button_search = gr.Button('Research')
470
+ button_search.click(search_models,txt_search,txt_output_search)
471
+
472
+ with gr.Row():
473
+ txt_search_info = gr.Textbox(label='Search info in')
474
+ txt_output_search_info = gr.Textbox(label='Search info out')
475
+ button_search_info = gr.Button('Research info')
476
+ button_search_info.click(search_info,txt_search_info,txt_output_search_info)
477
 
478
+
 
479
  with gr.Row():
480
+ test_button.click(test_pass_aff,txt_input_p,stuffs)
481
+ test_button.click(test_pass,txt_input_p,group_tag_choice)
482
  gr.HTML("""
483
  <div class="footer">
484
  <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier!
485
  </p>
486
  """)
 
 
487
 
488
  js_code = """
489
 
 
493
  """
494
 
495
 
496
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo:
497
  gr.Markdown("<script>" + js_code + "</script>")
498
  make_me()
499
 
500
 
501
+ # https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance
502
+ #demo.queue(concurrency_count=999) # concurrency_count is deprecated in 4.x
503
+ demo.queue(default_concurrency_limit=200, max_size=200)
504
+ demo.launch(max_threads=400)