yiyixuxu commited on
Commit
0681d43
1 Parent(s): 85c218e

add img to 3d model

Browse files
image_encoder/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14",
3
+ "architectures": [
4
+ "CLIPVisionModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "quick_gelu",
9
+ "hidden_size": 1024,
10
+ "image_size": 224,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 16,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 24,
19
+ "patch_size": 14,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.29.2"
23
+ }
image_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49767fbe1b9ea4688056a86db9404053b9a34c3f6a95776cb85622e7ea8a35ae
3
+ size 1212850925
image_processor/preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "CLIPFeatureExtractor",
7
+ "image_mean": [
8
+ 0.48145466,
9
+ 0.4578275,
10
+ 0.40821073
11
+ ],
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "resample": 3,
18
+ "size": 224
19
+ }
model_index.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ShapEPriorPipeline",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "HeunDiscreteScheduler"
7
+ ],
8
+ "image_encoder": [
9
+ "transformers",
10
+ "CLIPVisionModel"
11
+ ],
12
+ "image_processor": [
13
+ "transformers",
14
+ "CLIPImageProcessor"
15
+ ],
16
+ "prior": [
17
+ "diffusers",
18
+ "PriorTransformer"
19
+ ],
20
+ "params_proj": [
21
+ "shap_e",
22
+ "ShapEParamsProjModel"
23
+ ],
24
+ "renderer": [
25
+ "shap_e",
26
+ "MLPNeRSTFModel"
27
+ ]
28
+ }
params_proj/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ShapEParamsProjModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "d_latent": 1024,
5
+ "param_names": [
6
+ "nerstf.mlp.0.weight",
7
+ "nerstf.mlp.1.weight",
8
+ "nerstf.mlp.2.weight",
9
+ "nerstf.mlp.3.weight"
10
+ ],
11
+ "param_shapes": [
12
+ [
13
+ 256,
14
+ 93
15
+ ],
16
+ [
17
+ 256,
18
+ 256
19
+ ],
20
+ [
21
+ 256,
22
+ 256
23
+ ],
24
+ [
25
+ 256,
26
+ 256
27
+ ]
28
+ ]
29
+ }
params_proj/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f58082e9d56a2f137d18c59698c2035b447e3e2606d4441e58d8ee82cc91250
3
+ size 903717877
prior/config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PriorTransformer",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "additional_embeddings": 0,
5
+ "attention_head_dim": 128,
6
+ "clip_embedding_dim": 1024,
7
+ "dropout": 0.0,
8
+ "embedding_dim": 1024,
9
+ "norm_embedding_proj": true,
10
+ "num_attention_heads": 8,
11
+ "num_embeddings": 1024,
12
+ "num_layers": 24,
13
+ "out_dim": 2048,
14
+ "time_embed_act_fn": "gelu",
15
+ "time_embed_dim": 4096
16
+ }
prior/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f8d21a0e7a7fc9963d3219db3bfb9a35be724405bbb65a9eed2d481c64298c6
3
+ size 1263977743
renderer/config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "MLPNeRSTFModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "act_fn": "swish",
5
+ "d_hidden": 256,
6
+ "insert_direction_at": 4,
7
+ "n_hidden_layers": 6,
8
+ "n_output": 12
9
+ }
renderer/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bac1ac152675a9c58140f336aaa1e255b65188035eea4cbb32be2738c9b13de
3
+ size 1480443
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "HeunDiscreteScheduler",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "beta_schedule": "exp",
5
+ "trained_betas": null,
6
+ "num_train_timesteps": 1024,
7
+ "prediction_type": "sample",
8
+ "set_alpha_to_one": false,
9
+ "skip_prk_steps": true,
10
+ "steps_offset": 1,
11
+ "use_karras_sigmas": false
12
+ }