Spaces:
Runtime error
Runtime error
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import spaces | |
import os | |
import torch | |
import transformers | |
from transformers import ( | |
AutoTokenizer, | |
AutoModelForCausalLM, | |
BitsAndBytesConfig, | |
pipeline | |
) | |
################################################################# | |
# Tokenizer | |
################################################################# | |
model_name='kimou605/shadow-clown-BioMistral-7B-DARE' | |
model_config = transformers.AutoConfig.from_pretrained( | |
model_name, | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
tokenizer.pad_token = tokenizer.eos_token | |
tokenizer.padding_side = "right" | |
################################################################# | |
# bitsandbytes parameters | |
################################################################# | |
# Activate 4-bit precision base model loading | |
use_4bit = True | |
# Compute dtype for 4-bit base models | |
bnb_4bit_compute_dtype = "float16" | |
# Quantization type (fp4 or nf4) | |
bnb_4bit_quant_type = "nf4" | |
# Activate nested quantization for 4-bit base models (double quantization) | |
use_nested_quant = True | |
################################################################# | |
# Set up quantization config | |
################################################################# | |
compute_dtype = getattr(torch, bnb_4bit_compute_dtype) | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=use_4bit, | |
bnb_4bit_quant_type=bnb_4bit_quant_type, | |
bnb_4bit_compute_dtype=compute_dtype, | |
bnb_4bit_use_double_quant=use_nested_quant, | |
) | |
# Check GPU compatibility with bfloat16 | |
if compute_dtype == torch.float16 and use_4bit: | |
major, _ = torch.cuda.get_device_capability() | |
if major >= 8: | |
print("=" * 80) | |
print("Your GPU supports bfloat16: accelerate training with bf16=True") | |
print("=" * 80) | |
################################################################# | |
# Load pre-trained config | |
################################################################# | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
quantization_config=bnb_config, | |
) | |
pipeline = transformers.pipeline( | |
"text-generation", | |
model=model, | |
torch_dtype=torch.float16, | |
device_map="auto", | |
tokenizer=tokenizer, | |
) | |
# messages = [{"role": "user", "content": "what is ai"}] | |
# prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
# outputs = pipeline(prompt, max_new_tokens=1024, do_sample=True, temperature=0.01, top_k=1, top_p=0.001)[0]["generated_text"] | |
# print(outputs[0]["generated_text"]) | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
subprocess.check_call([sys.executable, "-m", "pip", "install", "-U", "bitsandbytes"]) | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
for message in pipeline.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)", | |
), | |
], | |
css="footer{display:none !important}", | |
) | |
if __name__ == "__main__": | |
demo.launch() |