Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
import streamlit as st
|
4 |
+
from huggingface_hub import snapshot_download
|
5 |
+
|
6 |
+
import subprocess
|
7 |
+
|
8 |
+
# Recompile llama.cpp before running
|
9 |
+
subprocess.run(["make", "clean"], cwd="/home/user/app/llama.cpp", check=True)
|
10 |
+
subprocess.run(["make"], cwd="/home/user/app/llama.cpp", check=True)
|
11 |
+
|
12 |
+
def check_directory_path(directory_name: str) -> str:
|
13 |
+
if os.path.exists(directory_name):
|
14 |
+
path = os.path.abspath(directory_name)
|
15 |
+
return str(path)
|
16 |
+
|
17 |
+
# Define quantization types
|
18 |
+
QUANT_TYPES = [
|
19 |
+
"Q2_K", "Q3_K_M", "Q3_K_S", "Q4_K_M", "Q4_K_S",
|
20 |
+
"Q5_K_M", "Q5_K_S", "Q6_K"
|
21 |
+
]
|
22 |
+
|
23 |
+
model_dir_path=check_directory_path("llama.cpp")
|
24 |
+
|
25 |
+
def download_model(hf_model_name, output_dir="models"):
|
26 |
+
"""
|
27 |
+
Downloads a Hugging Face model and saves it locally.
|
28 |
+
"""
|
29 |
+
st.write(f"π₯ Downloading `{hf_model_name}` from Hugging Face...")
|
30 |
+
os.makedirs(output_dir, exist_ok=True)
|
31 |
+
snapshot_download(repo_id=hf_model_name, local_dir=output_dir, local_dir_use_symlinks=False)
|
32 |
+
st.success("β
Model downloaded successfully!")
|
33 |
+
|
34 |
+
def convert_to_gguf(model_dir, output_file):
|
35 |
+
"""
|
36 |
+
Converts a Hugging Face model to GGUF format.
|
37 |
+
"""
|
38 |
+
st.write(f"π Converting `{model_dir}` to GGUF format...")
|
39 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
40 |
+
st.write(model_dir_path)
|
41 |
+
cmd = [
|
42 |
+
"python3", f"{model_dir_path}/convert_hf_to_gguf.py", model_dir,
|
43 |
+
"--outtype", "f16", "--outfile", output_file
|
44 |
+
]
|
45 |
+
process = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
46 |
+
if process.returncode == 0:
|
47 |
+
st.success(f"β
Conversion complete: `{output_file}`")
|
48 |
+
else:
|
49 |
+
st.error(f"β Conversion failed: {process.stderr}")
|
50 |
+
|
51 |
+
def quantize_llama(model_path, quantized_output_path, quant_type):
|
52 |
+
"""
|
53 |
+
Quantizes a GGUF model.
|
54 |
+
"""
|
55 |
+
st.write(f"β‘ Quantizing `{model_path}` with `{quant_type}` precision...")
|
56 |
+
os.makedirs(os.path.dirname(quantized_output_path), exist_ok=True)
|
57 |
+
quantize_path = f"{model_dir_path}/build/bin/llama-quantize"
|
58 |
+
subprocess.run(["chmod", "+x", quantize_path], check=True)
|
59 |
+
|
60 |
+
cmd = [
|
61 |
+
f"{model_dir_path}/build/bin/llama-quantize",
|
62 |
+
model_path,
|
63 |
+
quantized_output_path,
|
64 |
+
quant_type
|
65 |
+
]
|
66 |
+
|
67 |
+
process = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
68 |
+
|
69 |
+
if process.returncode == 0:
|
70 |
+
st.success(f"β
Quantized model saved at `{quantized_output_path}`")
|
71 |
+
else:
|
72 |
+
st.error(f"β Quantization failed: {process.stderr}")
|
73 |
+
|
74 |
+
def automate_llama_quantization(hf_model_name, quant_type):
|
75 |
+
"""
|
76 |
+
Orchestrates the entire quantization process.
|
77 |
+
"""
|
78 |
+
output_dir = "models"
|
79 |
+
gguf_file = os.path.join(output_dir, f"{hf_model_name.replace('/', '_')}.gguf")
|
80 |
+
quantized_file = gguf_file.replace(".gguf", f"-{quant_type}.gguf")
|
81 |
+
|
82 |
+
progress_bar = st.progress(0)
|
83 |
+
|
84 |
+
# Step 1: Download
|
85 |
+
st.write("### Step 1: Downloading Model")
|
86 |
+
download_model(hf_model_name, output_dir)
|
87 |
+
progress_bar.progress(33)
|
88 |
+
|
89 |
+
# Step 2: Convert to GGUF
|
90 |
+
st.write("### Step 2: Converting Model to GGUF Format")
|
91 |
+
convert_to_gguf(output_dir, gguf_file)
|
92 |
+
progress_bar.progress(66)
|
93 |
+
|
94 |
+
# Step 3: Quantize Model
|
95 |
+
st.write("### Step 3: Quantizing Model")
|
96 |
+
quantize_llama(gguf_file, quantized_file, quant_type.lower())
|
97 |
+
progress_bar.progress(100)
|
98 |
+
|
99 |
+
st.success(f"π All steps completed! Quantized model available at: `{quantized_file}`")
|
100 |
+
return quantized_file
|
101 |
+
|
102 |
+
# Streamlit UI
|
103 |
+
st.title("π¦ LLaMA Model Quantization (llama.cpp)")
|
104 |
+
|
105 |
+
hf_model_name = st.text_input("Enter Hugging Face Model Name", "Qwen/Qwen2.5-1.5B")
|
106 |
+
quant_type = st.selectbox("Select Quantization Type", QUANT_TYPES)
|
107 |
+
start_button = st.button("π Start Quantization")
|
108 |
+
|
109 |
+
if start_button:
|
110 |
+
with st.spinner("Processing..."):
|
111 |
+
quantized_model_path = automate_llama_quantization(hf_model_name, quant_type)
|
112 |
+
if quantized_model_path:
|
113 |
+
with open(quantized_model_path, "rb") as f:
|
114 |
+
st.download_button("β¬οΈ Download Quantized Model", f, file_name=os.path.basename(quantized_model_path))
|
115 |
+
|