triphuong57 commited on
Commit
60d4d9b
β€’
1 Parent(s): ceb5b67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -3,8 +3,10 @@ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsA
3
  from peft import PeftModel
4
  import spaces
5
  import torch
6
-
7
  device = "cuda"
 
 
 
8
  @spaces.GPU(duration=120)
9
  def greet(image, prompt):
10
 
@@ -12,11 +14,11 @@ def greet(image, prompt):
12
  load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16
13
  )
14
 
15
- # base_model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-mix-224").to(device)
16
- model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
17
- return prompt
18
- # processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
19
 
 
 
 
 
20
  # # model = PeftModel(base_model, "/folders").to(device)
21
  # inputs = processor(prompt, raw_image, return_tensors="pt")
22
  # output = model.generate(**inputs, max_new_tokens=20)
 
3
  from peft import PeftModel
4
  import spaces
5
  import torch
 
6
  device = "cuda"
7
+ base_model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-mix-224").to(device)
8
+ processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
9
+
10
  @spaces.GPU(duration=120)
11
  def greet(image, prompt):
12
 
 
14
  load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16
15
  )
16
 
 
 
 
 
17
 
18
+ # model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
19
+
20
+
21
+ return prompt
22
  # # model = PeftModel(base_model, "/folders").to(device)
23
  # inputs = processor(prompt, raw_image, return_tensors="pt")
24
  # output = model.generate(**inputs, max_new_tokens=20)