triphuong57 commited on
Commit
ebb8d22
β€’
1 Parent(s): 60d4d9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -3,16 +3,20 @@ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsA
3
  from peft import PeftModel
4
  import spaces
5
  import torch
 
 
 
 
6
  device = "cuda"
7
- base_model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-mix-224").to(device)
8
  processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
9
 
10
  @spaces.GPU(duration=120)
11
  def greet(image, prompt):
12
 
13
- quantization_config = BitsAndBytesConfig(
14
- load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16
15
- )
16
 
17
 
18
  # model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
 
3
  from peft import PeftModel
4
  import spaces
5
  import torch
6
+ from huggingface_hub.hf_api import HfFolder
7
+ import os
8
+ token = os.getenv('token')
9
+ HfFolder.save(token)
10
  device = "cuda"
11
+ base_model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-mix-224")
12
  processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
13
 
14
  @spaces.GPU(duration=120)
15
  def greet(image, prompt):
16
 
17
+ # quantization_config = BitsAndBytesConfig(
18
+ # load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16
19
+ # )
20
 
21
 
22
  # model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)