John6666 commited on
Commit
12b1fe6
·
verified ·
1 Parent(s): f87e288

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +21 -14
  2. hfconstants.py +5 -0
  3. hfsearch.py +62 -21
app.py CHANGED
@@ -14,15 +14,20 @@ with gr.Blocks(theme="NoCrypt/miku", fill_width=True, css=CSS) as demo:
14
  gr.Markdown("# Search Hugging Face🤗", elem_classes="title")
15
  with gr.Column():
16
  search_result = gr.State(value=HFSearchResult())
 
17
  with gr.Tab("Normal Search"):
18
  with gr.Group():
19
  with gr.Row(equal_height=True):
20
  repo_types = gr.CheckboxGroup(label="Repo type", choices=["model", "dataset", "space", "collection"], value=["model", "dataset", "space"])
21
  filter_str = gr.Textbox(label="Filter", info="String(s) to filter repos", value="")
 
 
22
  with gr.Accordion("Advanced", open=False):
23
  with gr.Row(equal_height=True):
24
  search_str = gr.Textbox(label="Search", info="A string that will be contained in the returned repo ids", placeholder="bert", value="", lines=1)
25
- author = gr.Textbox(label="Author", info="The author (user or organization)", value="", lines=1)
 
 
26
  with gr.Column():
27
  tags = gr.Textbox(label="Tags", info="Tag(s) to filter repos", value="")
28
  with gr.Accordion("Tag input assistance", open=False):
@@ -33,9 +38,11 @@ with gr.Blocks(theme="NoCrypt/miku", fill_width=True, css=CSS) as demo:
33
  subtag_cat = gr.Dropdown(label="Category", choices=get_subtag_categories(), value=get_subtag_categories()[0], scale=2)
34
  subtag_item = gr.Dropdown(label="Item", choices=[""], value="", allow_custom_value=True, scale=2)
35
  subtug_btn = gr.Button("Add", scale=1)
36
- with gr.Column():
37
- gated_status = gr.Radio(label="Gated status", choices=["gated", "non-gated", "all"], value="all")
38
- appr_status = gr.CheckboxGroup(label="Approval method", choices=["auto", "manual"], value=["auto", "manual"])
 
 
39
  with gr.Tab("for Models"):
40
  with gr.Column():
41
  infer_status = gr.Radio(label="Inference status", choices=["warm", "cold", "frozen", "all"], value="all")
@@ -54,13 +61,12 @@ with gr.Blocks(theme="NoCrypt/miku", fill_width=True, css=CSS) as demo:
54
  with gr.Row(equal_height=True):
55
  hardware = gr.CheckboxGroup(label="Specify hardware", choices=SPACE_HARDWARES, value=[])
56
  stage = gr.CheckboxGroup(label="Specify stage", choices=SPACE_STAGES, value=[])
57
- with gr.Row(equal_height=True):
58
- sort = gr.Radio(label="Sort", choices=["last_modified", "likes", "downloads", "trending_score"], value="likes")
59
- sort_method = gr.Radio(label="Sort method", choices=["ascending order", "descending order"], value="ascending order")
60
- limit = gr.Number(label="Limit", info="If 0, fetches all models", value=1000, step=1, minimum=0, maximum=10000000)
61
- fetch_detail = gr.CheckboxGroup(label="Fetch detail", choices=["Space Runtime"], value=["Space Runtime"])
62
- with gr.Row(equal_height=True):
63
- show_labels = gr.CheckboxGroup(label="Show items", choices=get_labels(), value=get_valid_labels())
64
  run_button = gr.Button("Search", variant="primary")
65
  with gr.Tab("Find Serverless Inference API enabled models"):
66
  with gr.Group():
@@ -97,11 +103,11 @@ with gr.Blocks(theme="NoCrypt/miku", fill_width=True, css=CSS) as demo:
97
  result_df = gr.DataFrame(label="Results", type="pandas", value=None, interactive=False)
98
 
99
  run_button.click(search, [repo_types, sort, sort_method, filter_str, search_str, author, tags, infer_status, gated_status, appr_status,
100
- size_categories, limit, hardware, stage, fetch_detail, show_labels, search_result],
101
  [result_df, hide_labels, search_result])\
102
  .success(update_filter, [filter_item1, search_result], [filter_item1, filter1, filter_btn, search_result], queue=False)
103
  infer_run_button.click(search, [infer_repo_types, sort, sort_method, filter_str, search_str, author, tags, infer_infer_status, infer_gated_status, infer_appr_status,
104
- size_categories, limit, hardware, stage, fetch_detail, show_labels, search_result],
105
  [result_df, hide_labels, search_result])\
106
  .success(update_filter, [filter_item1, search_result], [filter_item1, filter1, filter_btn, search_result], queue=False)
107
  gr.on(triggers=[hide_labels.change, filter_btn.click], fn=update_df, inputs=[hide_labels, filter_item1, filter1, search_result],
@@ -110,7 +116,8 @@ with gr.Blocks(theme="NoCrypt/miku", fill_width=True, css=CSS) as demo:
110
  subtag_cat.change(update_subtag_items, [subtag_cat], [subtag_item], queue=False, show_api=False)
111
  subtug_btn.click(update_subtags, [tags, subtag_cat, subtag_item], [tags], queue=False, show_api=False)
112
  tag_btn.click(update_tags, [tags, tag_item], [tags], queue=False, show_api=False)
113
- gr.on(triggers=[rec_run_button.click, rec_repo_id.submit], fn=search_ref_repos, inputs=[rec_repo_id, rec_repo_types, rec_sort, rec_show_labels, rec_limit, search_result],
 
114
  outputs=[result_df, hide_labels, search_result])
115
 
116
  demo.queue().launch()
 
14
  gr.Markdown("# Search Hugging Face🤗", elem_classes="title")
15
  with gr.Column():
16
  search_result = gr.State(value=HFSearchResult())
17
+ ui_mode = gr.Radio(label="Mode", choices=["PC", "Phone"], value="Phone")
18
  with gr.Tab("Normal Search"):
19
  with gr.Group():
20
  with gr.Row(equal_height=True):
21
  repo_types = gr.CheckboxGroup(label="Repo type", choices=["model", "dataset", "space", "collection"], value=["model", "dataset", "space"])
22
  filter_str = gr.Textbox(label="Filter", info="String(s) to filter repos", value="")
23
+ sort = gr.Radio(label="Sort", choices=["last_modified", "likes", "downloads", "trending_score"], value="likes")
24
+
25
  with gr.Accordion("Advanced", open=False):
26
  with gr.Row(equal_height=True):
27
  search_str = gr.Textbox(label="Search", info="A string that will be contained in the returned repo ids", placeholder="bert", value="", lines=1)
28
+ #author = gr.Textbox(label="Author", info="The author (user or organization)", value="", lines=1)
29
+ author = HuggingfaceHubSearch(label="Author", placeholder="The author (user or organization)", search_type="user", sumbit_on_select=False)
30
+ followed = HuggingfaceHubSearch(label="Followed by", placeholder="Extract repos followed by this user", search_type="user", sumbit_on_select=False)
31
  with gr.Column():
32
  tags = gr.Textbox(label="Tags", info="Tag(s) to filter repos", value="")
33
  with gr.Accordion("Tag input assistance", open=False):
 
38
  subtag_cat = gr.Dropdown(label="Category", choices=get_subtag_categories(), value=get_subtag_categories()[0], scale=2)
39
  subtag_item = gr.Dropdown(label="Item", choices=[""], value="", allow_custom_value=True, scale=2)
40
  subtug_btn = gr.Button("Add", scale=1)
41
+ with gr.Row(equal_height=True):
42
+ with gr.Column():
43
+ gated_status = gr.Radio(label="Gated status", choices=["gated", "non-gated", "all"], value="all")
44
+ appr_status = gr.CheckboxGroup(label="Approval method", choices=["auto", "manual"], value=["auto", "manual"])
45
+ limit = gr.Number(label="Limit", info="If 0, fetches all models", value=1000, step=1, minimum=0, maximum=10000000)
46
  with gr.Tab("for Models"):
47
  with gr.Column():
48
  infer_status = gr.Radio(label="Inference status", choices=["warm", "cold", "frozen", "all"], value="all")
 
61
  with gr.Row(equal_height=True):
62
  hardware = gr.CheckboxGroup(label="Specify hardware", choices=SPACE_HARDWARES, value=[])
63
  stage = gr.CheckboxGroup(label="Specify stage", choices=SPACE_STAGES, value=[])
64
+ with gr.Accordion("More Advanced", open=False):
65
+ with gr.Row(equal_height=True):
66
+ sort_method = gr.Radio(label="Sort method", choices=["ascending order", "descending order"], value="ascending order")
67
+ fetch_detail = gr.CheckboxGroup(label="Fetch detail", choices=["Space Runtime"], value=["Space Runtime"])
68
+ with gr.Row(equal_height=True):
69
+ show_labels = gr.CheckboxGroup(label="Show items", choices=get_labels(), value=get_valid_labels())
 
70
  run_button = gr.Button("Search", variant="primary")
71
  with gr.Tab("Find Serverless Inference API enabled models"):
72
  with gr.Group():
 
103
  result_df = gr.DataFrame(label="Results", type="pandas", value=None, interactive=False)
104
 
105
  run_button.click(search, [repo_types, sort, sort_method, filter_str, search_str, author, tags, infer_status, gated_status, appr_status,
106
+ size_categories, limit, hardware, stage, followed, fetch_detail, show_labels, ui_mode, search_result],
107
  [result_df, hide_labels, search_result])\
108
  .success(update_filter, [filter_item1, search_result], [filter_item1, filter1, filter_btn, search_result], queue=False)
109
  infer_run_button.click(search, [infer_repo_types, sort, sort_method, filter_str, search_str, author, tags, infer_infer_status, infer_gated_status, infer_appr_status,
110
+ size_categories, limit, hardware, stage, followed, fetch_detail, show_labels, ui_mode, search_result],
111
  [result_df, hide_labels, search_result])\
112
  .success(update_filter, [filter_item1, search_result], [filter_item1, filter1, filter_btn, search_result], queue=False)
113
  gr.on(triggers=[hide_labels.change, filter_btn.click], fn=update_df, inputs=[hide_labels, filter_item1, filter1, search_result],
 
116
  subtag_cat.change(update_subtag_items, [subtag_cat], [subtag_item], queue=False, show_api=False)
117
  subtug_btn.click(update_subtags, [tags, subtag_cat, subtag_item], [tags], queue=False, show_api=False)
118
  tag_btn.click(update_tags, [tags, tag_item], [tags], queue=False, show_api=False)
119
+ gr.on(triggers=[rec_run_button.click, rec_repo_id.submit], fn=search_ref_repos,
120
+ inputs=[rec_repo_id, rec_repo_types, rec_sort, rec_show_labels, rec_limit, ui_mode, search_result],
121
  outputs=[result_df, hide_labels, search_result])
122
 
123
  demo.queue().launch()
hfconstants.py CHANGED
@@ -6,6 +6,11 @@ SPACE_HARDWARES = ["cpu-basic", "zero-a10g", "cpu-upgrade", "t4-small", "l4x1",
6
 
7
  SPACE_STAGES = ["RUNNING", "SLEEPING", "RUNTIME_ERROR", "PAUSED", "BUILD_ERROR", "CONFIG_ERROR", "BUILDING", "APP_STARTING", "RUNNING_APP_STARTING"]
8
 
 
 
 
 
 
9
  PIPELINES = ["text-generation", "text-to-image", "image-text-to-text", "fill-mask", "text-classification", "sentence-similarity", "automatic-speech-recognition",
10
  "feature-extraction", "text-to-speech", "text2text-generation", "image-to-image", "text-to-video", "zero-shot-image-classification",
11
  "image-classification", "image-to-video", "image-to-text", "token-classification", "translation", "time-series-forecasting", "audio-classification",
 
6
 
7
  SPACE_STAGES = ["RUNNING", "SLEEPING", "RUNTIME_ERROR", "PAUSED", "BUILD_ERROR", "CONFIG_ERROR", "BUILDING", "APP_STARTING", "RUNNING_APP_STARTING"]
8
 
9
+ SPACE_STAGES_EMOJI = {
10
+ "SLEEPING": "💤", "RUNTIME_ERROR": "💀", "PAUSED": "⏸️", "BUILD_ERROR": "💀",
11
+ "CONFIG_ERROR": "💀", "BUILDING": "🚧", "APP_STARTING": "🚧", "RUNNING_APP_STARTING": "🚧"
12
+ }
13
+
14
  PIPELINES = ["text-generation", "text-to-image", "image-text-to-text", "fill-mask", "text-classification", "sentence-similarity", "automatic-speech-recognition",
15
  "feature-extraction", "text-to-speech", "text2text-generation", "image-to-image", "text-to-video", "zero-shot-image-classification",
16
  "image-classification", "image-to-video", "image-to-text", "token-classification", "translation", "time-series-forecasting", "audio-classification",
hfsearch.py CHANGED
@@ -8,7 +8,7 @@ import pandas as pd
8
  import datetime
9
  import json
10
  import re
11
- from hfconstants import DS_SIZE_CATEGORIES, SPACE_HARDWARES, SPACE_STAGES, EMOJIS
12
 
13
  @spaces.GPU
14
  def dummy_gpu():
@@ -288,10 +288,18 @@ class HFSearchResult():
288
  self.show_labels = []
289
  self.filter_items = None
290
  self.filters = None
 
291
  gc.collect()
292
 
293
  def reset(self):
294
  self.__init__()
 
 
 
 
 
 
 
295
 
296
  def _set(self, data, label: str):
297
  self.labels.set(label)
@@ -358,9 +366,10 @@ class HFSearchResult():
358
  self._next()
359
 
360
  def search(self, repo_types: list, sort: str, sort_method: str, filter_str: str, search_str: str, author: str, tags: str, infer: str, gated: str, appr: list[str],
361
- size_categories: list, limit: int, hardware: list, stage: list, fetch_detail: list, show_labels: list):
362
  try:
363
  self.reset()
 
364
  self.show_labels = show_labels.copy()
365
  api = HfApi()
366
  kwargs = {}
@@ -422,13 +431,15 @@ class HFSearchResult():
422
  cols = api.list_collections(**ckwargs)
423
  for col in cols:
424
  self.add_item(col)
 
425
  self.sort(sort)
426
  except Exception as e:
427
  raise Exception(f"Search error: {e}") from e
428
 
429
- def search_ref_repos(self, repo_id: str, repo_types: str, sort: str, show_labels: list, limit=10):
430
  try:
431
  self.reset()
 
432
  self.show_labels = show_labels.copy()
433
  api = HfApi()
434
  if "model" in repo_types or "dataset" in repo_types or "space" in repo_types or "paper" in repo_types:
@@ -453,8 +464,8 @@ class HFSearchResult():
453
  self._do_filter()
454
  dflist = [[item.get(l, self.labels.get_null_value(t)) for l, t in zip(labels, label_types)] for item, is_hide in zip(self.item_list, self.item_hide_flags) if not is_hide]
455
  df = self._to_pandas(dflist, labels)
456
- show_label_types = [t for l, t in zip(labels, label_types) if l not in self.hide_labels and l in self.show_labels]
457
- show_labels = [l for l in labels if l not in self.hide_labels and l in self.show_labels]
458
  return df, show_labels, show_label_types
459
 
460
  def _to_pandas(self, dflist: list, labels: list):
@@ -478,8 +489,8 @@ class HFSearchResult():
478
  if "AllDLs" in columns: sdf = rank_df(sdf, df, "AllDLs")
479
  if "DLs" in columns: sdf = rank_df(sdf, df, "DLs")
480
  if "Status" in columns:
481
- sdf.loc[df["Status"] == "warm", ["Type"]] = 'color: orange'
482
- sdf.loc[df["Status"] == "cold", ["Type"]] = 'color: dodgerblue'
483
  if "Gated" in columns:
484
  sdf.loc[df["Gated"] == "auto", ["Gated"]] = 'color: dodgerblue'
485
  sdf.loc[df["Gated"] == "manual", ["Gated"]] = 'color: crimson'
@@ -496,12 +507,32 @@ class HFSearchResult():
496
  sdf = sdf.drop(drop_columns, axis=1)
497
  return sdf
498
 
499
- def id_to_md(df: pd.DataFrame):
500
- if df["Type"] == "collection": return f'[{df["User"]}/{df["Name"]}]({df["URL"]}){df["Emoji"]}'
501
- elif df["Type"] == "space": return f'[{df["Name"]} ({df["ID"]})]({df["URL"]}){df["Emoji"]}'
502
- elif df["Type"] == "paper": return f'[{df["Name"]} (arxiv:{df["ID"]})]({df["URL"]}){df["Emoji"]}'
503
- else: return f'[{df["ID"]}]({df["URL"]}){df["Emoji"]}'
504
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  def to_emoji(df: pd.DataFrame, label: str, key: str, emoji: str):
506
  if df[label] == key: return f'{df["Emoji"]}{emoji}' if df["Emoji"] else f' {emoji}'
507
  else: return df["Emoji"]
@@ -513,12 +544,12 @@ class HFSearchResult():
513
  df["Emoji"] = df.apply(to_emoji, axis=1, label=label, key=key, emoji=emoji)
514
  return df
515
 
516
- def format_md_df(df: pd.DataFrame):
517
- df["ID"] = df.apply(id_to_md, axis=1)
518
  return df
519
 
520
- hide_labels = [l for l in labels if l in self.hide_labels or l not in self.show_labels]
521
- df = format_md_df(apply_emoji_df(pd.DataFrame(dflist, columns=labels)))
522
  ref_df = df.copy()
523
  df = df.drop(hide_labels, axis=1).style.apply(highlight_df, axis=None, df=ref_df)
524
  return df
@@ -568,9 +599,18 @@ class HFSearchResult():
568
  if not key in self.labels.get()[0]: key = "Likes"
569
  self.item_list, self.item_hide_flags, self.item_info_list = zip(*sorted(zip(self.item_list, self.item_hide_flags, self.item_info_list), key=lambda x: x[0][key], reverse=True))
570
 
 
 
 
 
 
 
571
  def get_gr_df(self):
572
  df, labels, label_types = self.get()
573
  widths = self.labels.get_widths()
 
 
 
574
  column_widths = [widths[l] for l in labels]
575
  return gr.update(type="pandas", value=df, headers=labels, datatype=label_types, column_widths=column_widths, wrap=True)
576
 
@@ -595,18 +635,19 @@ class HFSearchResult():
595
  return gr.update(choices=[""] + [t[0] for t in sorted(d.items(), key=lambda x : x[1])][:100], value="", visible=True)
596
 
597
  def search(repo_types: list, sort: str, sort_method: str, filter_str: str, search_str: str, author: str, tags: str, infer: str,
598
- gated: str, appr: list[str], size_categories: list, limit: int, hardware: list, stage: list, fetch_detail: list, show_labels: list, r: HFSearchResult):
 
599
  try:
600
  r.search(repo_types, sort, sort_method, filter_str, search_str, author, tags, infer, gated, appr, size_categories,
601
- limit, hardware, stage, fetch_detail, show_labels)
602
  return r.get_gr_df(), r.get_gr_hide_labels(), r
603
  except Exception as e:
604
  raise gr.Error(e)
605
 
606
- def search_ref_repos(repo_id: str, repo_types: list, sort: str, show_labels: list, limit, r: HFSearchResult):
607
  try:
608
  if not repo_id: raise gr.Error("Input Repo ID")
609
- r.search_ref_repos(repo_id, repo_types, sort, show_labels, limit)
610
  return r.get_gr_df(), r.get_gr_hide_labels(), r
611
  except Exception as e:
612
  raise gr.Error(e)
 
8
  import datetime
9
  import json
10
  import re
11
+ from hfconstants import DS_SIZE_CATEGORIES, SPACE_HARDWARES, SPACE_STAGES, SPACE_STAGES_EMOJI, EMOJIS
12
 
13
  @spaces.GPU
14
  def dummy_gpu():
 
288
  self.show_labels = []
289
  self.filter_items = None
290
  self.filters = None
291
+ self.phone_mode = True #
292
  gc.collect()
293
 
294
  def reset(self):
295
  self.__init__()
296
+
297
+ def set_mode(self, mode: str):
298
+ if mode == "Phone": self.phone_mode = True
299
+ elif mode == "PC": self.phone_mode = False
300
+
301
+ def get_show_labels(self):
302
+ return ["Type", "ID"] if self.phone_mode else self.show_labels
303
 
304
  def _set(self, data, label: str):
305
  self.labels.set(label)
 
366
  self._next()
367
 
368
  def search(self, repo_types: list, sort: str, sort_method: str, filter_str: str, search_str: str, author: str, tags: str, infer: str, gated: str, appr: list[str],
369
+ size_categories: list, limit: int, hardware: list, stage: list, followed: str, fetch_detail: list, show_labels: list, ui_mode="PC"):
370
  try:
371
  self.reset()
372
+ self.set_mode(ui_mode)
373
  self.show_labels = show_labels.copy()
374
  api = HfApi()
375
  kwargs = {}
 
431
  cols = api.list_collections(**ckwargs)
432
  for col in cols:
433
  self.add_item(col)
434
+ if followed: self.followed_by(followed)
435
  self.sort(sort)
436
  except Exception as e:
437
  raise Exception(f"Search error: {e}") from e
438
 
439
+ def search_ref_repos(self, repo_id: str, repo_types: str, sort: str, show_labels: list, limit=10, ui_mode="PC"):
440
  try:
441
  self.reset()
442
+ self.set_mode(ui_mode)
443
  self.show_labels = show_labels.copy()
444
  api = HfApi()
445
  if "model" in repo_types or "dataset" in repo_types or "space" in repo_types or "paper" in repo_types:
 
464
  self._do_filter()
465
  dflist = [[item.get(l, self.labels.get_null_value(t)) for l, t in zip(labels, label_types)] for item, is_hide in zip(self.item_list, self.item_hide_flags) if not is_hide]
466
  df = self._to_pandas(dflist, labels)
467
+ show_label_types = [t for l, t in zip(labels, label_types) if l not in self.hide_labels and l in self.get_show_labels()]
468
+ show_labels = [l for l in labels if l not in self.hide_labels and l in self.get_show_labels()]
469
  return df, show_labels, show_label_types
470
 
471
  def _to_pandas(self, dflist: list, labels: list):
 
489
  if "AllDLs" in columns: sdf = rank_df(sdf, df, "AllDLs")
490
  if "DLs" in columns: sdf = rank_df(sdf, df, "DLs")
491
  if "Status" in columns:
492
+ sdf.loc[df["Status"] == "warm", ["Type", "Status"]] = 'color: orange'
493
+ sdf.loc[df["Status"] == "cold", ["Type", "Status"]] = 'color: dodgerblue'
494
  if "Gated" in columns:
495
  sdf.loc[df["Gated"] == "auto", ["Gated"]] = 'color: dodgerblue'
496
  sdf.loc[df["Gated"] == "manual", ["Gated"]] = 'color: crimson'
 
507
  sdf = sdf.drop(drop_columns, axis=1)
508
  return sdf
509
 
510
+ def id_to_md(df: pd.DataFrame, verbose=False):
511
+ columns = list(df.index)
512
+ if df["Type"] == "collection": id = f'[{df["User"]}/{df["Name"]}]({df["URL"]}){df["Emoji"]}'
513
+ elif df["Type"] == "space": id = f'[{df["Name"]} ({df["ID"]})]({df["URL"]}){df["Emoji"]}'
514
+ elif df["Type"] == "paper": id = f'[{df["Name"]} (arxiv:{df["ID"]})]({df["URL"]}){df["Emoji"]}'
515
+ else: id = f'[{df["ID"]}]({df["URL"]}){df["Emoji"]}'
516
+ if verbose:
517
+ l = []
518
+ if "NFAA" in columns and df["NFAA"] == "True": l.append('🤐')
519
+ if "Likes" in columns and df["Likes"] > 0: l.append(f'💕:{df["Likes"]}')
520
+ if df["Type"] in ["model", "space", "dataset"]:
521
+ if "Trending" in columns and df["Trending"] > 0: l.append(f'trend:{df["Trending"]}')
522
+ if df["Type"] in ["model", "dataset"]:
523
+ if "DLs" in columns and df["DLs"] > 0: l.append(f'DL:{df["DLs"]}')
524
+ if "Gated" in columns and df["Gated"] in ["manual", "auto"]: l.append(f'🔑:{df["Gated"]}')
525
+ if df["Type"] == "model":
526
+ if "Status" in columns:
527
+ if df["Status"] == "warm": l.append(f'inference:🔥')
528
+ elif df["Status"] == "cold": l.append(f'inference:🧊')
529
+ if df["Type"] == "space":
530
+ if "Hardware" in columns and df["Hardware"] in SPACE_HARDWARES and df["Hardware"] != "cpu-basic": l.append(f'{df["Hardware"]}')
531
+ if "SDK" in columns: l.append(f'{df["SDK"]}')
532
+ if "Stage" in columns and df["Stage"] in SPACE_STAGES_EMOJI.keys(): l.append(f'{SPACE_STAGES_EMOJI[df["Stage"]]}')
533
+ if len(l) > 0: id += f" ({' '.join(l)})"
534
+ return id
535
+
536
  def to_emoji(df: pd.DataFrame, label: str, key: str, emoji: str):
537
  if df[label] == key: return f'{df["Emoji"]}{emoji}' if df["Emoji"] else f' {emoji}'
538
  else: return df["Emoji"]
 
544
  df["Emoji"] = df.apply(to_emoji, axis=1, label=label, key=key, emoji=emoji)
545
  return df
546
 
547
+ def format_md_df(df: pd.DataFrame, verbose=False):
548
+ df["ID"] = df.apply(id_to_md, axis=1, verbose=verbose)
549
  return df
550
 
551
+ hide_labels = [l for l in labels if l in self.hide_labels or l not in self.get_show_labels()]
552
+ df = format_md_df(apply_emoji_df(pd.DataFrame(dflist, columns=labels)), verbose=self.phone_mode)
553
  ref_df = df.copy()
554
  df = df.drop(hide_labels, axis=1).style.apply(highlight_df, axis=None, df=ref_df)
555
  return df
 
599
  if not key in self.labels.get()[0]: key = "Likes"
600
  self.item_list, self.item_hide_flags, self.item_info_list = zip(*sorted(zip(self.item_list, self.item_hide_flags, self.item_info_list), key=lambda x: x[0][key], reverse=True))
601
 
602
+ def followed_by(self, user: str):
603
+ if not user: return
604
+ api = HfApi()
605
+ usernames = set([x.username for x in api.list_user_following(username=user)])
606
+ self.item_hide_flags = [True if i["ID"].split("/")[0] not in usernames else is_hide for i, is_hide in zip(self.item_list, self.item_hide_flags)]
607
+
608
  def get_gr_df(self):
609
  df, labels, label_types = self.get()
610
  widths = self.labels.get_widths()
611
+ if self.phone_mode:
612
+ widths["Type"] = "10%"
613
+ widths["ID"] = "90%"
614
  column_widths = [widths[l] for l in labels]
615
  return gr.update(type="pandas", value=df, headers=labels, datatype=label_types, column_widths=column_widths, wrap=True)
616
 
 
635
  return gr.update(choices=[""] + [t[0] for t in sorted(d.items(), key=lambda x : x[1])][:100], value="", visible=True)
636
 
637
  def search(repo_types: list, sort: str, sort_method: str, filter_str: str, search_str: str, author: str, tags: str, infer: str,
638
+ gated: str, appr: list[str], size_categories: list, limit: int, hardware: list, stage: list, followed: str,
639
+ fetch_detail: list, show_labels: list, ui_mode: str, r: HFSearchResult):
640
  try:
641
  r.search(repo_types, sort, sort_method, filter_str, search_str, author, tags, infer, gated, appr, size_categories,
642
+ limit, hardware, stage, followed, fetch_detail, show_labels, ui_mode)
643
  return r.get_gr_df(), r.get_gr_hide_labels(), r
644
  except Exception as e:
645
  raise gr.Error(e)
646
 
647
+ def search_ref_repos(repo_id: str, repo_types: list, sort: str, show_labels: list, limit, ui_mode: str, r: HFSearchResult):
648
  try:
649
  if not repo_id: raise gr.Error("Input Repo ID")
650
+ r.search_ref_repos(repo_id, repo_types, sort, show_labels, limit, ui_mode)
651
  return r.get_gr_df(), r.get_gr_hide_labels(), r
652
  except Exception as e:
653
  raise gr.Error(e)