Upload ONNX weights (original + 8-bit quantized)

#3
by Xenova HF staff - opened
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cafcf770b06757c4eaced21b1a88e57fd2b66de01b8045f35f01535ba742e0f
3
+ size 176153355
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6648479275dfd0ede0f3a8abc20aa5c437b394681b05e5af6d268250aaf40f3
3
+ size 44403226
onnx/quantize_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": false,
3
+ "reduce_range": false,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Concat",
8
+ "MaxPool",
9
+ "Resize",
10
+ "Conv",
11
+ "Unsqueeze",
12
+ "Cast",
13
+ "Shape",
14
+ "Relu",
15
+ "Sigmoid",
16
+ "Gather",
17
+ "Constant",
18
+ "Slice",
19
+ "Add"
20
+ ],
21
+ "weight_type": "QUInt8"
22
+ }
23
+ }
24
+ }