import gradio as gr from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsAndBytesConfig from peft import PeftModel import spaces import torch from huggingface_hub.hf_api import HfFolder import os token = os.getenv('toke') HfFolder.save_token(token) device = "cuda" model = PaliGemmaForConditionalGeneration.from_pretrained("triphuong57/paligemma_lora").to(device) processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224") @spaces.GPU(duration=120) def greet(image, prompt): model_inputs = processor(text=prompt, images=image, return_tensors="pt") input_len = model_inputs["input_ids"].shape[-1] with torch.inference_mode(): generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False) decoded = processor.decode(generation[0][input_len:], skip_special_tokens=True) return decoded title = "Demo BTL nhóm 8" description = "Made by Nguyễn Quý Đang, Đỗ Minh Nhật, Vũ Vân Long" demo = gr.Interface(fn=greet, inputs=[gr.Image(label="Upload image", sources=['upload', 'webcam'], type="pil"), gr.Text()], outputs="text", title=title, description=description) demo.launch(share=True)