sanchit-gandhi HF staff commited on
Commit
62183fb
1 Parent(s): 7bef690

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +35 -0
README.md CHANGED
@@ -1,3 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  <hfoptions id="gpu-select">
2
  <hfoption id="CUDA GPU">
3
 
 
1
+ ```diff
2
+ import torch
3
+ from transformers import WhisperForConditionalGeneration, WhisperProcessor
4
+
5
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
6
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
7
+
8
+ model_id = "openai/whisper-large-v3"
9
+
10
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
11
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
12
+ ).to(device)
13
+
14
+ + model.generation_config.cache_implementation = "static"
15
+ + model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
16
+
17
+ processor = AutoProcessor.from_pretrained(model_id)
18
+
19
+ pipe = pipeline(
20
+ "automatic-speech-recognition",
21
+ model=model,
22
+ tokenizer=processor.tokenizer,
23
+ feature_extractor=processor.feature_extractor,
24
+ torch_dtype=torch_dtype,
25
+ device=device,
26
+ )
27
+
28
+ dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
29
+ sample = dataset[0]["audio"]
30
+
31
+ result = pipe(sample)
32
+ print(result["text"])
33
+ ```
34
+
35
+
36
  <hfoptions id="gpu-select">
37
  <hfoption id="CUDA GPU">
38