add finetune script

This commit is contained in:
pengzhendong
2025-12-24 13:49:09 +08:00
parent 2b79dffe3e
commit 78619da40c
5 changed files with 116 additions and 10 deletions

1
data/train_example.jsonl Normal file
View File

@ -0,0 +1 @@
{"messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "语音转写:<|startofspeech|>!/path/to/wav<|endofspeech|>"}, {"role": "assistant", "content": "content of /path/to/wav"}], "speech_length": 42, "text_length": 42}

1
data/val_example.jsonl Normal file
View File

@ -0,0 +1 @@
{"messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "语音转写:<|startofspeech|>!/path/to/wav<|endofspeech|>"}, {"role": "assistant", "content": "content of /path/to/wav"}], "speech_length": 42, "text_length": 42}

View File

@ -0,0 +1,33 @@
{
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"steps_per_print": 100,
"gradient_clipping": 5,
"fp16": {
"enabled": false,
"auto_cast": false,
"loss_scale": 0,
"initial_scale_power": 16,
"loss_scale_window": 1000,
"hysteresis": 2,
"consecutive_hysteresis": false,
"min_loss_scale": 1
},
"bf16": {
"enabled": true
},
"zero_force_ds_cpu_optimizer": false,
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "none",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients" : true
}
}

80
finetune.sh Normal file
View File

@ -0,0 +1,80 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
# model_name from model_hub, or model_dir in local path
## option 1, download model automatically
model_name_or_model_dir="FunAudioLLM/Fun-ASR-Nano-2512"
## option 2, download model by git
#local_path_root=${workspace}/modelscope_models
#mkdir -p ${local_path_root}/${model_name_or_model_dir}
#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
# data dir, which contains: train.json, val.json
train_data=${workspace}/data/train_example.jsonl
val_data=${workspace}/data/val_example.jsonl
# exp output dir
output_dir="./outputs"
log_file="${output_dir}/log.txt"
deepspeed_config=${workspace}/deepspeed_conf/ds_stage1.json
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
DISTRIBUTED_ARGS="
--nnodes ${WORLD_SIZE:-1} \
--nproc_per_node $gpu_num \
--node_rank ${RANK:-0} \
--master_addr ${MASTER_ADDR:-127.0.0.1} \
--master_port ${MASTER_PORT:-26669}
"
echo $DISTRIBUTED_ARGS
# funasr trainer path
if [ -f `dirname $(which funasr)`/train_ds.py ]; then
train_tool=`dirname $(which funasr)`/train_ds.py
elif [ -f `dirname $(which funasr)`/../lib/python*/site-packages/funasr/bin/train_ds.py ]; then
train_tool=`dirname $(which funasr)`/../lib/python*/site-packages/funasr/bin/train_ds.py
else
echo "Error: train_ds.py not found in funasr bin directory."
exit 1
fi
ABSOLUTE_PATH=$(cd $(dirname $train_tool); pwd)
train_tool=${ABSOLUTE_PATH}/train_ds.py
echo "Using funasr trainer: ${train_tool}"
torchrun $DISTRIBUTED_ARGS \
${train_tool} \
++model="${model_name_or_model_dir}" \
++trust_remote_code=true \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
++dataset_conf.data_split_num=1 \
++dataset_conf.batch_sampler="BatchSampler" \
++dataset_conf.batch_size=6000 \
++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
++train_conf.avg_nbest_model=10 \
++train_conf.use_deepspeed=false \
++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}

View File

@ -125,7 +125,6 @@ class FunASRNano(nn.Module):
self.use_low_frame_rate = audio_adaptor_conf.get("use_low_frame_rate", False)
self.length_normalized_loss = length_normalized_loss
self.feat_permute = audio_encoder_conf.get("feat_permute", True)
rank = int(os.environ.get("RANK", 0))
logging.info(f"rank: {rank}, model is builded.")
@ -255,12 +254,7 @@ class FunASRNano(nn.Module):
def encode(self, speech, speech_lengths):
# audio encoder
if self.feat_permute:
encoder_out, encoder_out_lens = self.audio_encoder(
speech.permute(0, 2, 1), speech_lengths
)
else:
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
return encoder_out, encoder_out_lens
@ -385,9 +379,6 @@ class FunASRNano(nn.Module):
/ 1000
)
if self.feat_permute:
speech = speech.permute(0, 2, 1)
if self.use_low_frame_rate:
olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
olens = 1 + (olens - 3 + 2 * 1) // 2