aiqcamp commited on
Commit
b122ddc
·
verified ·
1 Parent(s): 5bb223e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -32
app.py CHANGED
@@ -1,29 +1,15 @@
1
  import os,sys
 
 
2
 
3
  # install required packages
4
- os.system('pip install -q python-dotenv') # python-dotenv 설치 추가
5
- os.system('pip install -q transformers')
6
  os.system('pip install -q plotly')
7
  os.system('pip install -q matplotlib')
8
  os.system('pip install dgl==1.0.2+cu116 -f https://data.dgl.ai/wheels/cu116/repo.html')
9
  os.environ["DGLBACKEND"] = "pytorch"
10
  print('Modules installed')
11
 
12
- from huggingface_hub import HfApi
13
- from dotenv import load_dotenv
14
-
15
- # 환경 변수 로드 및 토큰 설정
16
- load_dotenv()
17
- HF_TOKEN = os.getenv("HF_TOKEN")
18
- if not HF_TOKEN:
19
- raise ValueError("HF_TOKEN not found in environment variables. Please set it in .env file")
20
-
21
- # Hugging Face API 설정
22
- os.environ["HUGGINGFACE_TOKEN"] = HF_TOKEN
23
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
24
-
25
  # 필수 라이브러리 임포트
26
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
27
  from datasets import load_dataset
28
  import plotly.graph_objects as go
29
  import numpy as np
@@ -38,18 +24,52 @@ from utils.parsers_inference import parse_pdb
38
  from model.util import writepdb
39
  from utils.inpainting_util import *
40
 
41
- # Cohere 모델 사용 (토큰 인증 포함)
42
- model_name = "CohereForAI/c4ai-command-r-plus-08-2024"
43
- tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
44
- model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
45
- pipe = pipeline("text-generation",
46
- model=model,
47
- tokenizer=tokenizer,
48
- trust_remote_code=True)
 
 
49
 
50
  # 데이터셋 로드
51
  ds = load_dataset("lamm-mit/protein_secondary_structure_from_PDB",
52
- token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # 챗봇 및 단백질 생성 관련 함수들
55
  def process_chat(message, history):
@@ -563,19 +583,41 @@ def combined_generation(name, strength, flexibility, speed, defense, size, abili
563
  None
564
  )
565
 
566
- # Gradio 인터페이스 수정
567
  with gr.Blocks(theme='ParityError/Interstellar') as demo:
568
  with gr.Row():
569
  # 왼쪽 열: 챗봇 및 컨트롤 패널
570
  with gr.Column(scale=1):
571
  # 챗봇 인터페이스
572
  gr.Markdown("# 🤖 AI 단백질 설계 도우미")
573
- chatbot = gr.Chatbot()
574
- msg = gr.Textbox(
575
- label="명령을 입력하세요 (예: COVID-19를 치료할 수 있는 단백질을 생성해주세요)",
576
- placeholder="여기에 입력하세요..."
577
- )
578
- clear = gr.Button("대화 내용 지우기")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579
 
580
  # 탭 인터페이스
581
  with gr.Tabs():
@@ -907,6 +949,17 @@ with gr.Blocks(theme='ParityError/Interstellar') as demo:
907
  outputs=[hero_stats, hero_description, output_seq, output_pdb, output_viewer, plddt_plot]
908
  )
909
 
 
 
 
 
 
 
 
 
 
 
 
910
  # 실행
911
  demo.queue()
912
  demo.launch(debug=True)
 
1
  import os,sys
2
+ from openai import OpenAI
3
+ import gradio as gr
4
 
5
  # install required packages
 
 
6
  os.system('pip install -q plotly')
7
  os.system('pip install -q matplotlib')
8
  os.system('pip install dgl==1.0.2+cu116 -f https://data.dgl.ai/wheels/cu116/repo.html')
9
  os.environ["DGLBACKEND"] = "pytorch"
10
  print('Modules installed')
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # 필수 라이브러리 임포트
 
13
  from datasets import load_dataset
14
  import plotly.graph_objects as go
15
  import numpy as np
 
24
  from model.util import writepdb
25
  from utils.inpainting_util import *
26
 
27
+ # Hugging Face 토큰 설정
28
+ ACCESS_TOKEN = os.getenv("HF_TOKEN")
29
+ if not ACCESS_TOKEN:
30
+ raise ValueError("HF_TOKEN not found in environment variables")
31
+
32
+ # OpenAI 클라이언트 설정 (Hugging Face 엔드포인트 사용)
33
+ client = OpenAI(
34
+ base_url="https://api-inference.huggingface.co/v1/",
35
+ api_key=ACCESS_TOKEN,
36
+ )
37
 
38
  # 데이터셋 로드
39
  ds = load_dataset("lamm-mit/protein_secondary_structure_from_PDB",
40
+ token=ACCESS_TOKEN)
41
+
42
+ def respond(
43
+ message,
44
+ history: list[tuple[str, str]],
45
+ system_message,
46
+ max_tokens,
47
+ temperature,
48
+ top_p,
49
+ ):
50
+ messages = [{"role": "system", "content": system_message}]
51
+
52
+ for val in history:
53
+ if val[0]:
54
+ messages.append({"role": "user", "content": val[0]})
55
+ if val[1]:
56
+ messages.append({"role": "assistant", "content": val[1]})
57
+
58
+ messages.append({"role": "user", "content": message})
59
+
60
+ response = ""
61
+
62
+ for message in client.chat.completions.create(
63
+ model="CohereForAI/c4ai-command-r-plus-08-2024",
64
+ max_tokens=max_tokens,
65
+ stream=True,
66
+ temperature=temperature,
67
+ top_p=top_p,
68
+ messages=messages,
69
+ ):
70
+ token = message.choices[0].delta.content
71
+ response += token
72
+ yield response
73
 
74
  # 챗봇 및 단백질 생성 관련 함수들
75
  def process_chat(message, history):
 
583
  None
584
  )
585
 
 
586
  with gr.Blocks(theme='ParityError/Interstellar') as demo:
587
  with gr.Row():
588
  # 왼쪽 열: 챗봇 및 컨트롤 패널
589
  with gr.Column(scale=1):
590
  # 챗봇 인터페이스
591
  gr.Markdown("# 🤖 AI 단백질 설계 도우미")
592
+ chatbot = gr.Chatbot(height=600)
593
+
594
+ with gr.Accordion("채팅 설정", open=False):
595
+ system_message = gr.Textbox(
596
+ value="당신은 단백질 설계를 도와주는 전문가입니다.",
597
+ label="시스템 메시지"
598
+ )
599
+ max_tokens = gr.Slider(
600
+ minimum=1,
601
+ maximum=2048,
602
+ value=512,
603
+ step=1,
604
+ label="최대 토큰 수"
605
+ )
606
+ temperature = gr.Slider(
607
+ minimum=0.1,
608
+ maximum=4.0,
609
+ value=0.7,
610
+ step=0.1,
611
+ label="Temperature"
612
+ )
613
+ top_p = gr.Slider(
614
+ minimum=0.1,
615
+ maximum=1.0,
616
+ value=0.95,
617
+ step=0.05,
618
+ label="Top-P"
619
+ )
620
+
621
 
622
  # 탭 인터페이스
623
  with gr.Tabs():
 
949
  outputs=[hero_stats, hero_description, output_seq, output_pdb, output_viewer, plddt_plot]
950
  )
951
 
952
+ chat_interface = gr.ChatInterface(
953
+ respond,
954
+ additional_inputs=[
955
+ system_message,
956
+ max_tokens,
957
+ temperature,
958
+ top_p,
959
+ ],
960
+ chatbot=chatbot,
961
+ )
962
+
963
  # 실행
964
  demo.queue()
965
  demo.launch(debug=True)