Spaces:
totolook
/
Runtime error

KBaba7 commited on
Commit
1d5810f
·
verified ·
1 Parent(s): f9787e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -2
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import subprocess
3
  import streamlit as st
4
- from huggingface_hub import snapshot_download
5
 
6
  def check_directory_path(directory_name: str) -> str:
7
  if os.path.exists(directory_name):
@@ -92,6 +92,30 @@ def automate_llama_quantization(hf_model_name, quant_type):
92
  st.success(f"🎉 All steps completed! Quantized model available at: `{quantized_file}`")
93
  return quantized_file
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  # Streamlit UI
96
  st.title("🦙 LLaMA Model Quantization (llama.cpp)")
97
 
@@ -104,4 +128,17 @@ if start_button:
104
  quantized_model_path = automate_llama_quantization(hf_model_name, quant_type)
105
  if quantized_model_path:
106
  with open(quantized_model_path, "rb") as f:
107
- st.download_button("⬇️ Download Quantized Model", f, file_name=os.path.basename(quantized_model_path))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
  import streamlit as st
4
+ from huggingface_hub import snapshot_download, login
5
 
6
  def check_directory_path(directory_name: str) -> str:
7
  if os.path.exists(directory_name):
 
92
  st.success(f"🎉 All steps completed! Quantized model available at: `{quantized_file}`")
93
  return quantized_file
94
 
95
+ def upload_to_huggingface(file_path, repo_id, token):
96
+ """
97
+ Uploads a file to Hugging Face Hub.
98
+ """
99
+ try:
100
+ # Log in to Hugging Face
101
+ login(token=token)
102
+
103
+ # Initialize HfApi
104
+ api = HfApi()
105
+
106
+ # Create the repository if it doesn't exist
107
+ api.create_repo(repo_id, exist_ok=True, repo_type="model")
108
+
109
+ # Upload the file
110
+ api.upload_file(
111
+ path_or_fileobj=file_path,
112
+ path_in_repo=os.path.basename(file_path),
113
+ repo_id=repo_id,
114
+ )
115
+ st.success(f"✅ File uploaded to Hugging Face: {repo_id}")
116
+ except Exception as e:
117
+ st.error(f"❌ Failed to upload file: {e}")
118
+
119
  # Streamlit UI
120
  st.title("🦙 LLaMA Model Quantization (llama.cpp)")
121
 
 
128
  quantized_model_path = automate_llama_quantization(hf_model_name, quant_type)
129
  if quantized_model_path:
130
  with open(quantized_model_path, "rb") as f:
131
+ st.download_button("⬇️ Download Quantized Model", f, file_name=os.path.basename(quantized_model_path))
132
+ upload_to_hf = st.checkbox("Upload to Hugging Face")
133
+
134
+ if upload_to_hf:
135
+ st.write("### Upload to Hugging Face")
136
+ repo_id = st.text_input("Enter Hugging Face Repository ID (e.g., 'username/repo-name')")
137
+ hf_token = st.text_input("Enter Hugging Face Token", type="password")
138
+
139
+ if st.button("📤 Upload to Hugging Face"):
140
+ if repo_id and hf_token:
141
+ with st.spinner("Uploading..."):
142
+ upload_to_huggingface(quantized_model_path, repo_id, hf_token)
143
+ else:
144
+ st.warning("Please provide a valid repository ID and Hugging Face token.")