PyTorch
llama
alignment-handbook
Generated from Trainer
JunxiongWang commited on
Commit
782e9a9
1 Parent(s): dff2e25

Update configs.yaml

Browse files
Files changed (1) hide show
  1. configs.yaml +4 -4
configs.yaml CHANGED
@@ -1,13 +1,13 @@
1
- llama3_0_875_mamba2_sft_3dataset_ep1:
2
  prompt_template: "templates/llama3.txt"
3
  fn_completions: "huggingface_local_completions"
4
  completions_kwargs:
5
- model_name: "/data/junxiong/sft/dpo/llama3_0_875_mamba2_sft_3dataset_ep1/"
6
  model_kwargs:
7
  torch_dtype: 'bfloat16'
8
  max_new_tokens: 2048
9
  temperature: 0.7
10
  top_p: 1.0
11
  do_sample: True
12
- pretty_name: "Mamba 0 5 From Zephyr 7B Beta"
13
- link: "https://huggingface.co/HuggingFaceH4/zephyr-7b-beta"
 
1
+ Mamba2InLlama_0_875:
2
  prompt_template: "templates/llama3.txt"
3
  fn_completions: "huggingface_local_completions"
4
  completions_kwargs:
5
+ model_name: "JunxiongWang/Mamba2InLlama_0_875"
6
  model_kwargs:
7
  torch_dtype: 'bfloat16'
8
  max_new_tokens: 2048
9
  temperature: 0.7
10
  top_p: 1.0
11
  do_sample: True
12
+ pretty_name: "Mamba2 0 875 From meta-llama/Meta-Llama-3-8B-Instruct"
13
+ link: "https://huggingface.co/JunxiongWang/Mamba2InLlama_0_875"