mmcquade commited on
Commit
bdad13c
1 Parent(s): f09b74d

updates to files

Browse files
Files changed (2) hide show
  1. app.py +10 -29
  2. requirements.txt +0 -3
app.py CHANGED
@@ -1,37 +1,18 @@
1
  #python3
 
2
  #build a text summarizer using hugging face and gradio
3
 
4
-
5
  import gradio as gr
6
- import pandas as pd
7
- import numpy as np
8
- import tensorflow as tf
9
  import transformers
10
- from transformers import TFAutoModel, AutoTokenizer
11
-
12
- model_class, tokenizer_class, pretrained_weights = (TFAutoModel, AutoTokenizer, 'bert-base-uncased')
13
-
14
- # Load pretrained model/tokenizer
15
- tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
16
- model = model_class.from_pretrained(pretrained_weights)
17
-
18
- def get_summary(article):
19
- article_input_ids = tokenizer.encode(article, return_tensors='tf')
20
- summary_ids = model.generate(article_input_ids)
21
- summary_txt = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
22
- return summary_txt
23
 
24
- def get_summary_gradio(article):
25
- return get_summary(article)
26
 
27
- iface = gr.Interface(get_summary_gradio, "textbox", "textbox", live=True,
28
- examples=[
29
- ["The quick brown fox jumps over the lazy dog."],
30
- ["The world is a strange place. Sometimes, things are what they seem. But then, if you look closer, they can become something entirely different."],
31
- ["The sky is clear; the stars are twinkling. I'm going to bed now. Good night."],
32
- ["The president of the United States, and the president of the United Kingdom, have both been in the White House."],
33
- ["The president of the United States, and the president of the United Kingdom, have both been in the White House."]
34
- ])
35
 
36
- if __name__ == "__main__":
37
- iface.launch()
 
1
  #python3
2
+ #pytorch
3
  #build a text summarizer using hugging face and gradio
4
 
 
5
  import gradio as gr
 
 
 
6
  import transformers
7
+ from transformers import BartTokenizer, BartForConditionalGeneration
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
10
+ tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
11
 
12
+ def bart_summarizer(input_text):
13
+ input_text = tokenizer.batch_encode_plus([input_text], max_length=1024, return_tensors='pt')
14
+ summary_ids = model.generate(input_text['input_ids'], num_beams=4, max_length=100, early_stopping=True)
15
+ output = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
16
+ return output[0]
 
 
 
17
 
18
+ gr.Interface(fn=bart_summarizer, inputs=gr.inputs.Textbox(lines=7, placeholder="Enter some long text here"), outputs="textbox", live=True).launch()
 
requirements.txt CHANGED
@@ -1,5 +1,2 @@
1
- tensorflow
2
- gradio
3
- numpy
4
  transformers
5
  torch
 
 
 
 
1
  transformers
2
  torch