git clone https://huggingface.co/Qwen/Qwen-7B
git clone https://gitee.com/meijunhui0917/LLaMA-Efficient-Tuning.git
"file_name": "huanhuan.json",
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path ../Qwen/Qwen-7B\
--do_train \
--dataset huanhuan \
--template default \
--finetuning_type lora \
--lora_target c_attn \
--output_dir ./model \
--overwrite_cache \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--plot_loss \
--fp16
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--model_name_or_path ../Qwen/Qwen-7B \
--max_source_length 512 \
--max_target_length 512 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 4 \
--lr_scheduler_type cosine \
--resume_lora_training True \
--output_dir saves/Qwen-7B-chat/lora/2023-08-22-17-23-51 \