iOS在线识别:https://www.jianshu.com/u/3c2a0bd52ebc
前面部分讲的跟语音识别关系不大,这部分开始讲解语音识别相关的内容,首先接上部分内容,来讲解一下语音识别的解码,即输入音频输出文本。
先看一下kaldi给出的解码参考代码:
int main(int argc, char *argv[]) {
try {
using namespace kaldi;
using namespace fst;
typedef kaldi::int32 int32;
typedef kaldi::int64 int64;
const char *usage =
"Reads in wav file(s) and simulates online decoding with neural nets\n"
"(nnet3 setup), with optional iVector-based speaker adaptation and\n"
"optional endpointing. Note: some configuration values and inputs are\n"
"set via config files whose filenames are passed as options\n"
"\n"
"Usage: online2-wav-nnet3-latgen-faster [options] <nnet3-in> <fst-in> "
"<spk2utt-rspecifier> <wav-rspecifier> <lattice-wspecifier>\n"
"The spk2utt-rspecifier can just be <utterance-id> <utterance-id> if\n"
"you want to decode utterance by utterance.\n";
ParseOptions po(usage);
std::string word_syms_rxfilename;
// feature_opts includes configuration for the iVector adaptation,
// as well as the basic features.
OnlineNnet2FeaturePipelineConfig feature_opts;
nnet3::NnetSimpleLoopedComputationOptions decodable_opts;
LatticeFasterDecoderConfig decoder_opts;
OnlineEndpointConfig endpoint_opts;
BaseFloat chunk_length_secs = 0.18;
bool do_endpointing = false;
bool online = true;
po.Register("chunk-length", &chunk_length_secs,
"Length of chunk size in seconds, that we process. Set to <= 0 "
"to use all input in one chunk.");
po.Register("word-symbol-table", &word_syms_rxfilename,
"Symbol table for words [for debug output]");
po.Register("do-endpointing", &do_endpointing,
"If true, apply endpoint detection");
po.Register("online", &online,
"You can set this to false to disable online iVector estimation "
"and have all the data for each utterance used, even at "
"utterance start. This is useful where you just want the best "
"results and don't care about online operation. Setting this to "
"false has the same effect as setting "
"--use-most-recent-ivector=true and --greedy-ivector-extractor=true "
"in the file given to --ivector-extraction-config, and "
"--chunk-length=-1.");
po.Register("num-threads-startup", &g_num_threads,
"Number of threads used when initializing iVector extractor.");
feature_opts.Register(&po);
decodable_opts.Register(&po);
decoder_opts.Register(&po);
endpoint_opts.Register(&po);
po.Read(argc, argv);
if (po.NumArgs() != 5) {
po.PrintUsage();
return 1;
}
std::string nnet3_rxfilename = po.GetArg(1),
fst_rxfilename = po.GetArg(2),
spk2utt_rspecifier = po.GetArg(3),
wav_rspecifier = po.GetArg(4),
clat_wspecifier = po.GetArg(5);
OnlineNnet2FeaturePipelineInfo feature_info(feature_opts);
if (!online) {
feature_info.ivector_extractor_info.use_most_recent_ivector = true;
feature_info.ivector_extractor_info.greedy_ivector_extractor = true;
chunk_length_secs = -1.0;
}
TransitionModel trans_model;
nnet3::AmNnetSimple am_nnet;
{
bool binary;
Input ki(nnet3_rxfilename, &binary);
trans_model.Read(ki.Stream(), binary);
am_nnet.Read(ki.Stream(), binary);
SetBatchnormTestMode(true, &(am_nnet.GetNnet()));
SetDropoutTestMode(true, &(am_nnet.GetNnet()));
nnet3::CollapseModel(nnet3::CollapseModelConfig(), &(am_nnet.GetNnet()));
}
// this object contains precomputed stuff that is used by all decodable
// objects. It takes a pointer to am_nnet because if it has iVectors it has
// to modify the nnet to accept iVectors at intervals.
nnet3::DecodableNnetSimpleLoopedInfo decodable_info(decodable_opts,
&am_nnet);
fst::Fst<fst::StdArc> *decode_fst = ReadFstKaldiGeneric(fst_rxfilename);
fst::SymbolTable *word_syms = NULL;
if (word_syms_rxfilename != "")
if (!(word_syms = fst::SymbolTable::ReadText(word_syms_rxfilename)))
KALDI_ERR << "Could not read symbol table from file "
<< word_syms_rxfilename;
int32 num_done = 0, num_err = 0;
double tot_like = 0.0;
int64 num_frames = 0;
SequentialTokenVectorReader spk2utt_reader(spk2utt_rspecifier);
RandomAccessTableReader<WaveHolder> wav_reader(wav_rspecifier);
CompactLatticeWriter clat_writer(clat_wspecifier);
OnlineTimingStats timing_stats;
for (; !spk2utt_reader.Done(); spk2utt_reader.Next()) {
std::string spk = spk2utt_reader.Key();
const std::vector<std::string> &uttlist = spk2utt_reader.Value();
OnlineIvectorExtractorAdaptationState adaptation_state(
feature_info.ivector_extractor_info);
for (size_t i = 0; i < uttlist.size(); i++) {
std::string utt = uttlist[i];
if (!wav_reader.HasKey(utt)) {
KALDI_WARN << "Did not find audio for utterance " << utt;
num_err++;
continue;
}
const WaveData &wave_data = wav_reader.Value(utt);
// get the data for channel zero (if the signal is not mono, we only
// take the first channel).
SubVector<BaseFloat> data(wave_data.Data(), 0);
OnlineNnet2FeaturePipeline feature_pipeline(feature_info);
feature_pipeline.SetAdaptationState(adaptation_state);
OnlineSilenceWeighting silence_weighting(
trans_model,
feature_info.silence_weighting_config,
decodable_opts.frame_subsampling_factor);
SingleUtteranceNnet3Decoder decoder(decoder_opts, trans_model,
decodable_info,
*decode_fst, &feature_pipeline);
OnlineTimer decoding_timer(utt);
BaseFloat samp_freq = wave_data.SampFreq();
int32 chunk_length;
if (chunk_length_secs > 0) {
chunk_length = int32(samp_freq * chunk_length_secs);
if (chunk_length == 0) chunk_length = 1;
} else {
chunk_length = std::numeric_limits<int32>::max();
}
int32 samp_offset = 0;
std::vector<std::pair<int32, BaseFloat> > delta_weights;
while (samp_offset < data.Dim()) {
int32 samp_remaining = data.Dim() - samp_offset;
int32 num_samp = chunk_length < samp_remaining ? chunk_length
: samp_remaining;
SubVector<BaseFloat> wave_part(data, samp_offset, num_samp);
feature_pipeline.AcceptWaveform(samp_freq, wave_part);
samp_offset += num_samp;
decoding_timer.WaitUntil(samp_offset / samp_freq);
if (samp_offset == data.Dim()) {
// no more input. flush out last frames
feature_pipeline.InputFinished();
}
if (silence_weighting.Active() &&
feature_pipeline.IvectorFeature() != NULL) {
silence_weighting.ComputeCurrentTraceback(decoder.Decoder());
silence_weighting.GetDeltaWeights(feature_pipeline.NumFramesReady(),
&delta_weights);
feature_pipeline.IvectorFeature()->UpdateFrameWeights(delta_weights);
}
decoder.AdvanceDecoding();
if (do_endpointing && decoder.EndpointDetected(endpoint_opts)) {
break;
}
}
decoder.FinalizeDecoding();
CompactLattice clat;
bool end_of_utterance = true;
decoder.GetLattice(end_of_utterance, &clat);
GetDiagnosticsAndPrintOutput(utt, word_syms, clat,
&num_frames, &tot_like);
decoding_timer.OutputStats(&timing_stats);
// In an application you might avoid updating the adaptation state if
// you felt the utterance had low confidence. See lat/confidence.h
feature_pipeline.GetAdaptationState(&adaptation_state);
// we want to output the lattice with un-scaled acoustics.
BaseFloat inv_acoustic_scale =
1.0 / decodable_opts.acoustic_scale;
ScaleLattice(AcousticLatticeScale(inv_acoustic_scale), &clat);
clat_writer.Write(utt, clat);
KALDI_LOG << "Decoded utterance " << utt;
num_done++;
}
}
timing_stats.Print(online);
KALDI_LOG << "Decoded " << num_done << " utterances, "
<< num_err << " with errors.";
KALDI_LOG << "Overall likelihood per frame was " << (tot_like / num_frames)
<< " per frame over " << num_frames << " frames.";
delete decode_fst;
delete word_syms; // will delete if non-NULL.
return (num_done != 0 ? 0 : 1);
} catch(const std::exception& e) {
std::cerr << e.what();
return -1;
}
} // main()
我们的解码:
static void kaldidecoder_decode_segment(Gstkaldidecoder * filter,
bool &more_data,
int32 chunk_length,
BaseFloat traceback_period_secs) {
OnlineNnet2FeaturePipeline feature_pipeline(*(filter->feature_info));
feature_pipeline.SetAdaptationState(*(filter->adaptation_state));
SingleUtteranceNnet3Decoder decoder(*(filter->decoder_opts),
*(filter->trans_model),
*(filter->decodable_info_nnet3),
*(filter->decode_fst),
&feature_pipeline);
OnlineSilenceWeighting silence_weighting(*(filter->trans_model),
*(filter->silence_weighting_config));
Vector<BaseFloat> wave_part = Vector<BaseFloat>(chunk_length);
std::vector<std::pair<int32, BaseFloat> > delta_weights;
DEBUG_OBJECT(filter, "Reading audio in %d sample chunks...",
wave_part.Dim());
BaseFloat last_traceback = 0.0;
BaseFloat num_seconds_decoded = 0.0;
while (true) {
more_data = filter->audio_source->Read(&wave_part);
feature_pipeline.AcceptWaveform(filter->sample_rate, wave_part);
if (!more_data) {
feature_pipeline.InputFinished();
}
if (silence_weighting.Active() &&
feature_pipeline.IvectorFeature() != NULL) {
silence_weighting.ComputeCurrentTraceback(decoder.Decoder());
silence_weighting.GetDeltaWeights(feature_pipeline.IvectorFeature()->NumFramesReady(),
&delta_weights);
feature_pipeline.IvectorFeature()->UpdateFrameWeights(delta_weights);
}
decoder.AdvanceDecoding();
DEBUG_OBJECT(filter, "%d frames decoded", decoder.NumFramesDecoded());
num_seconds_decoded += 1.0 * wave_part.Dim() / filter->sample_rate;
filter->total_time_decoded += 1.0 * wave_part.Dim() / filter->sample_rate;
DEBUG_OBJECT(filter, "Total amount of audio processed: %f seconds", filter->total_time_decoded);
if (!more_data) {
break;
}
if (filter->do_endpointing
&& (decoder.NumFramesDecoded() > 0)
&& decoder.EndpointDetected(*(filter->endpoint_config))) {
GST_DEBUG_OBJECT(filter, "Endpoint detected!");
break;
}
if ((num_seconds_decoded - last_traceback > traceback_period_secs)
&& (decoder.NumFramesDecoded() > 0)) {
Lattice lat;
decoder.GetBestPath(false, &lat);
kaldidecoder_partial_result(filter, lat);
last_traceback += traceback_period_secs;
}
}
if (num_seconds_decoded > 0.1) {
DEBUG_OBJECT(filter, "Getting lattice..");
decoder.FinalizeDecoding();
CompactLattice clat;
bool end_of_utterance = true;
decoder.GetLattice(end_of_utterance, &clat);
DEBUG_OBJECT(filter, "Lattice done");
if ((filter->lm_fst != NULL) && (filter->big_lm_const_arpa != NULL)) {
DEBUG_OBJECT(filter, "Rescoring lattice with a big LM");
CompactLattice rescored_lat;
if (kaldidecoder_rescore_big_lm(filter, clat, rescored_lat)) {
clat = rescored_lat;
}
}
guint num_words = 0;
kaldidecoder_final_result(filter, clat, &num_words);
if (num_words >= filter->min_words_for_ivector) {
// Only update adaptation state if the utterance contained enough words
feature_pipeline.GetAdaptationState(filter->adaptation_state);
}
} else {
DEBUG_OBJECT(filter, "Less than 0.1 seconds decoded, discarding");
}
}
后面会详细讲解解码