short buf[N]; /* input/output buffer */
float Sn[M]; /* float input speech samples */
COMP Sw[FFT_ENC]; /* DFT of Sn[] */
+ kiss_fft_cfg fft_enc_cfg;
float w[M]; /* time domain hamming window */
COMP W[FFT_ENC]; /* DFT of w[] */
MODEL model;
/* Initialise ------------------------------------------------------------*/
- make_analysis_window(w,W);
+ fft_enc_cfg = kiss_fft_alloc(FFT_ENC, 1, NULL, NULL);
+ make_analysis_window(fft_enc_cfg, w,W);
make_synthesis_window(Pn);
quantise_init();
nlp(nlp_states,Sn,N,M,P_MIN,P_MAX,&pitch,Sw,&prev_uq_Wo);
model.Wo = TWO_PI/pitch;
- dft_speech(Sw, Sn, w);
+ dft_speech(fft_enc_cfg, Sw, Sn, w);
two_stage_pitch_refinement(&model, Sw);
estimate_amplitudes(&model, Sw, W);
uq_Wo = model.Wo;
c2->hpf_states[0] = c2->hpf_states[1] = 0.0;
for(i=0; i<2*N; i++)
c2->Sn_[i] = 0;
- make_analysis_window(c2->w,c2->W);
+ c2->fft_enc_cfg = kiss_fft_alloc(FFT_ENC, 1, NULL, NULL);
+ make_analysis_window(c2->fft_enc_cfg, c2->w,c2->W);
make_synthesis_window(c2->Pn);
quantise_init();
c2->prev_Wo_enc = 0.0;
{
assert(c2 != NULL);
nlp_destroy(c2->nlp);
+ KISS_FFT_FREE(c2->fft_enc_cfg);
free(c2);
}
for(i=0; i<N; i++)
c2->Sn[i+M-N] = speech[i];
- dft_speech(Sw, c2->Sn, c2->w);
+ dft_speech(c2->fft_enc_cfg, Sw, c2->Sn, c2->w);
/* Estimate pitch */
#define __CODEC2_INTERNAL__
struct CODEC2 {
- int mode;
- float w[M]; /* time domain hamming window */
- COMP W[FFT_ENC]; /* DFT of w[] */
- float Pn[2*N]; /* trapezoidal synthesis window */
- float Sn[M]; /* input speech */
- float hpf_states[2]; /* high pass filter states */
- void *nlp; /* pitch predictor states */
- float Sn_[2*N]; /* synthesised output speech */
- float ex_phase; /* excitation model phase track */
- float bg_est; /* background noise estimate for post filter */
- float prev_Wo_enc; /* previous frame's pitch estimate */
- MODEL prev_model_dec; /* previous frame's model parameters */
- float prev_lsps_dec[LPC_ORD]; /* previous frame's LSPs */
- float prev_e_dec; /* previous frame's LPC energy */
-
- float xq_enc[2]; /* joint pitch and energy VQ states */
- float xq_dec[2];
+ int mode;
+ kiss_fft_cfg fft_enc_cfg; /* FFT config for encoder */
+ float w[M]; /* time domain hamming window */
+ COMP W[FFT_ENC]; /* DFT of w[] */
+ float Pn[2*N]; /* trapezoidal synthesis window */
+ float Sn[M]; /* input speech */
+ float hpf_states[2]; /* high pass filter states */
+ void *nlp; /* pitch predictor states */
+
+ float Sn_[2*N]; /* synthesised output speech */
+ float ex_phase; /* excitation model phase track */
+ float bg_est; /* background noise estimate for post filter */
+ float prev_Wo_enc; /* previous frame's pitch estimate */
+ MODEL prev_model_dec; /* previous frame's model parameters */
+ float prev_lsps_dec[LPC_ORD]; /* previous frame's LSPs */
+ float prev_e_dec; /* previous frame's LPC energy */
+
+ float xq_enc[2]; /* joint pitch and energy VQ states */
+ float xq_dec[2];
};
#endif
The output can be used to plot a spectrum of the demod input.
Sucessive calls can be used to build up a waterfall or spectrogram
- plot, but mapping the levels to colours.
+ plot, by mapping the received levels to colours.
+
+ The time-frequency resolution of the FFT can be adjusted by varying
+ FDMDV_NFFT. Note that a 2*FDMDV_NFFT size FFT is reqd to get
+ FDMDV_NFFT output points.
\*---------------------------------------------------------------------------*/
void CODEC2_WIN32SUPPORT fdmdv_get_fft(struct FDMDV *f, float mag_dB[], float rx_fdm[], int nin)
{
int i,j;
- COMP F[2*FDMDV_NFFT];
+ COMP fft_io[2*FDMDV_NFFT];
float fullscale_dB;
+#ifdef TT
/* update buffer of input samples */
for(i=0; i<2*FDMDV_NFFT-nin; i++)
f->fft_buf[i] = f->fft_buf[i+nin];
for(j=0; j<nin; j++,i++)
f->fft_buf[i] = rx_fdm[j];
+ assert(i == 2*FDMDV_NFFT);
/* window and FFT */
for(i=0; i<2*FDMDV_NFFT; i++) {
- F[i].real = f->fft_buf[i] * (0.5 - 0.5*cos((float)i*2.0*PI/FDMDV_NFFT));
- F[i].imag = 0.0;
+ fft_io[i].real = f->fft_buf[i] * (0.5 - 0.5*cos((float)i*2.0*PI/(2*FDMDV_NFFT)));
+ fft_io[i].imag = 0.0;
+ }
+#endif
+ for(i=0; i<2*FDMDV_NFFT; i++) {
+ fft_io[i].real = 1.0;
+ fft_io[i].imag = 0.0;
}
- fft(&F[0].real, 2*FDMDV_NFFT, -1);
+ fft(&fft_io[0].real, 2*FDMDV_NFFT, -1);
+ printf("%d fft_io[%d] %f %f\n", FDMDV_NFFT,0, fft_io[0].real, fft_io[0].imag);
+
+#ifdef TMP
/* scale and convert to dB */
- fullscale_dB = 20*log10(FDMDV_NFFT*32767.0);
+ fullscale_dB = 20.0*log10(FDMDV_NFFT*32767.0);
+
for(i=0; i<FDMDV_NFFT; i++) {
- mag_dB[i] = 10*log10(F[i].real*F[i].real + F[i].imag*F[i].imag);
+ mag_dB[i] = 10.0*log10(F[i].real*F[i].real + F[i].imag*F[i].imag);
mag_dB[i] -= fullscale_dB;
}
+#endif
+ for(i=0; i<FDMDV_NFFT; i++)
+ mag_dB[i] = 1;
}
int f;
FILE *foct = NULL;
struct FDMDV_STATS stats;
- float *rx_fdm_log;
+ float *rx_fdm_log;
int rx_fdm_log_col_index;
COMP rx_symbols_log[FDMDV_NSYM][MAX_FRAMES];
int coarse_fine_log[MAX_FRAMES];
int sync_bit_log[MAX_FRAMES];
int rx_bits_log[FDMDV_BITS_PER_FRAME*MAX_FRAMES];
float snr_est_log[MAX_FRAMES];
-
+ float *fft_log;
+
if (argc < 3) {
printf("usage: %s InputModemRawFile OutputBitFile [OctaveDumpFile]\n", argv[0]);
printf("e.g %s hts1a_fdmdv.raw hts1a.c2\n", argv[0]);
exit(1);
}
- /* this cause out of stack probs on windows as a regular variable
- so let malloc it */
+ /* malloc some of the bigger variables to prevent out of stack problems */
rx_fdm_log = (float*)malloc(sizeof(float)*FDMDV_MAX_SAMPLES_PER_FRAME*MAX_FRAMES);
assert(rx_fdm_log != NULL);
+ fft_log = (float*)malloc(sizeof(float)*FDMDV_NFFT*MAX_FRAMES);
+ assert(fft_log != NULL);
fdmdv = fdmdv_create();
f = 0;
sync_bit_log[f] = sync_bit;
memcpy(&rx_bits_log[FDMDV_BITS_PER_FRAME*f], rx_bits, sizeof(int)*FDMDV_BITS_PER_FRAME);
snr_est_log[f] = stats.snr_est;
+
+ fdmdv_get_fft(fdmdv, &fft_log[f*FDMDV_NFFT], rx_fdm, nin_prev);
+
f++;
}
else
octave_save_int(foct, "rx_bits_log_c", rx_bits_log, 1, FDMDV_BITS_PER_FRAME*f);
octave_save_int(foct, "sync_bit_log_c", sync_bit_log, 1, f);
octave_save_float(foct, "snr_est_log_c", snr_est_log, 1, f, MAX_FRAMES);
+ //octave_save_float(foct, "fft_log_c", fft_log, f, FDMDV_NFFT, FDMDV_NFFT);
fclose(foct);
}
}
fclose(fin);
fclose(fout);
free(rx_fdm_log);
+ free(fft_log);
fdmdv_destroy(fdmdv);
return 0;
\*---------------------------------------------------------------------------*/
+#ifdef BRUCE
+
+/* Efficient but runs into problems if we have two different size FFTs
+ in the same program - see notes below */
+
void
fft (float x[], int n, int isign)
{
x[c + 1] = -fout[(c) / 2].i;
}
}
+#endif
+
+/* This version not as efficient but can handle different size FFTs in
+ the same program. This is reqd in fdmdv and if we link fdmdv and
+ codec 2 into same program. If CPU load becomes an issue we could always
+ modify to allocate FFT cfg states at start up.
+
+ Or maybe we should just bite the bullet and modify all fft() calls
+ to match the kiss_fft calling conventions. This would mean
+ allocating states for each fft at the start of the program which is
+ no biggie.
+
+*/
+
+#define DAVID
+#ifdef DAVID
+void
+fft (float x[], int n, int isign)
+{
+ int c;
+ kiss_fft_cfg cfg;
+ kiss_fft_cpx *input, *output;
+
+ input = KISS_FFT_MALLOC (n * sizeof (kiss_fft_cpx));
+ assert(input != NULL);
+ output = KISS_FFT_MALLOC (n * sizeof (kiss_fft_cpx));
+ assert(output != NULL);
+
+ for (c = 0; c < n * 2; c += 2) {
+ input[c / 2].r = x[c];
+ input[c / 2].i = -x[c + 1];
+ }
+ if (isign == -1)
+ cfg = kiss_fft_alloc (n, 1, NULL, NULL);
+ else
+ cfg = kiss_fft_alloc (n, 0, NULL, NULL);
+ kiss_fft (cfg, input, output);
+ for (c = 0; c < n * 2; c += 2) {
+ x[c] = output[(c) / 2].r;
+ x[c + 1] = -output[(c) / 2].i;
+ }
+ KISS_FFT_FREE(input);
+ KISS_FFT_FREE(output);
+ KISS_FFT_FREE(cfg);
+}
+#endif
void cleanup_fft(void)
{
#ifdef DUMP
dump_dec(Fw);
#endif
- kiss_fft (nlp->fft_cfg, (kiss_fft_cpx *)fw, (kiss_fft_cpx *)Fw);
+ kiss_fft(nlp->fft_cfg, (kiss_fft_cpx *)fw, (kiss_fft_cpx *)Fw);
//fft(&Fw[0].real,PE_FFT_SIZE,1);
for(i=0; i<PE_FFT_SIZE; i++)
Fw[i].real = Fw[i].real*Fw[i].real + Fw[i].imag*Fw[i].imag;
#include "defines.h"
#include "sine.h"
-#include "fft.h"
+#include "kiss_fft.h"
#define HPF_BETA 0.125
\*---------------------------------------------------------------------------*/
-void make_analysis_window(float w[],COMP W[])
+void make_analysis_window(kiss_fft_cfg fft_enc_cfg, float w[], COMP W[])
{
float m;
+ COMP wshift[FFT_ENC];
COMP temp;
int i,j;
*/
for(i=0; i<FFT_ENC; i++) {
- W[i].real = 0.0;
- W[i].imag = 0.0;
+ wshift[i].real = 0.0;
+ wshift[i].imag = 0.0;
}
for(i=0; i<NW/2; i++)
- W[i].real = w[i+M/2];
+ wshift[i].real = w[i+M/2];
for(i=FFT_ENC-NW/2,j=M/2-NW/2; i<FFT_ENC; i++,j++)
- W[i].real = w[j];
+ wshift[i].real = w[j];
- fft(&W[0].real,FFT_ENC,-1); /* "Numerical Recipes in C" FFT */
+ kiss_fft(fft_enc_cfg, (kiss_fft_cpx *)wshift, (kiss_fft_cpx *)W);
+
+ // fft(&W[0].real,FFT_ENC,-1); /* "Numerical Recipes in C" FFT */
/*
Re-arrange W[] to be symmetrical about FFT_ENC/2. Makes later
\*---------------------------------------------------------------------------*/
-void dft_speech(COMP Sw[], float Sn[], float w[])
+void dft_speech(kiss_fft_cfg fft_enc_cfg, COMP Sw[], float Sn[], float w[])
{
int i;
#include "defines.h"
#include "comp.h"
+#include "kiss_fft.h"
-void make_analysis_window(float w[], COMP W[]);
+void make_analysis_window(kiss_fft_cfg fft_enc_cfg, float w[], COMP W[]);
float hpf(float x, float states[]);
-void dft_speech(COMP Sw[], float Sn[], float w[]);
+void dft_speech(kiss_fft_cfg fft_enc_cfg, COMP Sw[], float Sn[], float w[]);
void two_stage_pitch_refinement(MODEL *model, COMP Sw[]);
void estimate_amplitudes(MODEL *model, COMP Sw[], COMP W[]);
float est_voicing_mbe(MODEL *model, COMP Sw[], COMP W[], COMP Sw_[],COMP Ew[],