赞
踩
1、训练指令
CUDA_VISIBLE_DEVICES=0 nohup swift sft --model_id_or_path "qwen/Qwen1.5-4B-Chat" --template_type "qwen" --system "You are a helpful assistant." --custom_train_dataset_path /home/qwen/data/train.json --dataset_test_ratio "0.1" --max_length "1024" --train_dataset_sample "2000" --save_steps "50" --output_dir "/home/qwen/output/" --lora_target_modules q_proj k_proj v_proj --learning_rate "1e-4" --num_train_epochs "30" --gradient_accumulation_steps "16" --eval_batch_size "1" --add_output_dir_suffix False --output_dir /home/qwen/output/qwen1half-4b-chat/shenlichaming --logging_dir /home/qwen/output/qwen1half-4b-chat/shenlichaming/runs > /home/qwen/output/qwen1half-4b-chat/shenlichaming/runs/run.log 2>&1 &
2、合并模型
(1)CUDA_VISIBLE_DEVICES=0 swift export --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350 --quant_bits 4 --quant_method awq --merge_lora true
# 量化使用微调时使用的数据集作为量化数据集
(2)CUDA_VISIBLE_DEVICES=0 swift export --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350 --merge_lora true --quant_bits 4 --load_dataset_config true --quant_method awq
CUDA_VISIBLE_DEVICES=0 swift infer --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350-merged-awq-int4 --infer_backend pt (特别慢不能使用)
3、运行模型
#合并并直接界面运行 未合并前会产生merged目录,合并后可直接运行merged目录模型
CUDA_VISIBLE_DEVICES=0 swift app-ui --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350 --infer_backend vllm --max_model_len 512 --merge_lora true
CUDA_VISIBLE_DEVICES=-1 swift app-ui --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350-merged --infer_backend pt --max_model_len 512 --server_name 0.0.0.0(cpu使用)
#控制台运行
CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-4b-chat --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350-merged --infer_backend pt
#api接口运行
CUDA_VISIBLE_DEVICES=0 swift deploy --ckpt_dir /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350-merged --infer_backend pt --max_model_len 512 --port 7861 --host 0.0.0.0
4、转换gguf
python convert-hf-to-gguf.py /home/qwen/output/qwen1half-4b-chat/v0-20240319-100101/checkpoint-350-merged --outfile /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-fp16.gguf
./quantize /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-fp16.gguf /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-q5_k_m.gguf q5_k_m
# start inference on a gguf model 运行gguf
./main -m /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-q5_k_m.gguf -n 128
./server -m /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-q5_k_m-350.gguf --host 0.0.0.0 --port 7861
python3 server.py --model-dir /home/qwen/output/qwen1half-4b-chat/qwen1_5-4b-chat-q5_k_m-350.gguf --listen --listen-host 0.0.0.0 --listen-port 7862 --api
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。