Forked from carlthome/Signal reconstruction from spectrograms.ipynb
Last active
June 23, 2018 17:12
-
-
Save mdda/96d082df610362652bbccd304fd5083b to your computer and use it in GitHub Desktop.
96d082df610362652bbccd304fd5083b
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"name": "96d082df610362652bbccd304fd5083b", | |
"version": "0.3.2", | |
"provenance": [], | |
"private_outputs": true, | |
"collapsed_sections": [], | |
"include_colab_link": true | |
}, | |
"kernelspec": { | |
"display_name": "Python 3", | |
"language": "python", | |
"name": "python3" | |
}, | |
"accelerator": "GPU" | |
}, | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "view-in-github", | |
"colab_type": "text" | |
}, | |
"source": [ | |
"[View in Colaboratory](https://colab.research.google.com/gist/mdda/96d082df610362652bbccd304fd5083b/notebook.ipynb)" | |
] | |
}, | |
{ | |
"metadata": { | |
"id": "1YBnbhGuh9Eg", | |
"colab_type": "text" | |
}, | |
"cell_type": "markdown", | |
"source": [ | |
"# Signal reconstruction from spectrograms\n", | |
"Reconstruct waveform from input spectrogram by iteratively minimizing a cost function between the spectrogram and white noise transformed into the exact same time-frequency domain.\n", | |
"\n", | |
"Assuming 50% magnitude overlap and linearly spaced frequencies this reconstruction method is pretty much lossless in terms of audio quality, which is nice in those cases where phase information cannot be recovered.\n", | |
"\n", | |
"Given a filtered spectrogram such as with a Mel filterbank, the resulting audio is noticeably degraded (particularly due to lost treble) but still decent.\n", | |
"\n", | |
"The biggest downside with this method is that the iterative procedure is very slow (running on a GPU is a good idea for any audio tracks longer than 20 seconds) compared to just having an inverse transform at hand.\n", | |
"\n", | |
"## Reference\n", | |
"- Decorsière, Rémi, et al. \"Inversion of auditory spectrograms, traditional spectrograms, and other envelope representations.\" IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP) 23.1 (2015): 46-56." | |
] | |
}, | |
{ | |
"metadata": { | |
"id": "6-A4wfExnt83", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"# !apt-get -qq -y install ffmpeg\n", | |
"!apt-get -qq -y install gstreamer" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "ZjqSl2lTlZgV", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"try:\n", | |
" import librosa\n", | |
"except:\n", | |
" ! pip install librosa\n", | |
" import librosa" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "dE1gj-K2lZUJ", | |
"colab_type": "text" | |
}, | |
"cell_type": "markdown", | |
"source": [ | |
"" | |
] | |
}, | |
{ | |
"metadata": { | |
"id": "s3IuWUf2h9Eh", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"from IPython.display import Image\n", | |
"Image('diagram.png')" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "0zUhOnUQh9Em", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"import tensorflow as tf\n", | |
"\n", | |
"\n", | |
"def sonify(spectrogram, samples, transform_op_fn, logscaled=True):\n", | |
" graph = tf.Graph()\n", | |
" with graph.as_default():\n", | |
"\n", | |
" noise = tf.Variable(tf.random_normal([samples], stddev=1e-6))\n", | |
"\n", | |
" x = transform_op_fn(noise)\n", | |
" y = spectrogram\n", | |
"\n", | |
" if logscaled:\n", | |
" x = tf.expm1(x)\n", | |
" y = tf.expm1(y)\n", | |
"\n", | |
" x = tf.nn.l2_normalize(x)\n", | |
" y = tf.nn.l2_normalize(y)\n", | |
" tf.losses.mean_squared_error(x, y)\n", | |
"\n", | |
" optimizer = tf.contrib.opt.ScipyOptimizerInterface(\n", | |
" loss=tf.losses.get_total_loss(),\n", | |
" var_list=[noise],\n", | |
" tol=1e-16,\n", | |
" method='L-BFGS-B',\n", | |
" options={\n", | |
" 'maxiter': 1000,\n", | |
" 'disp': True\n", | |
" })\n", | |
"\n", | |
" with tf.Session(graph=graph) as session:\n", | |
" session.run(tf.global_variables_initializer())\n", | |
" optimizer.minimize(session)\n", | |
" waveform = session.run(noise)\n", | |
"\n", | |
" return waveform" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "mWKAShW5h9Eo", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"import librosa as lr\n", | |
"\n", | |
"#sample_rate = 44100\n", | |
"sample_rate = 22050\n", | |
"\n", | |
"#path = lr.util.example_audio_file()\n", | |
"from google.colab import files\n", | |
"\n", | |
"uploaded = files.upload()\n", | |
"\n", | |
"for fn in uploaded.keys():\n", | |
" print('User uploaded file \"{name}\" with length {length} bytes'.format(\n", | |
" name=fn, length=len(uploaded[fn])))\n", | |
"\n", | |
" waveform = lr.load(fn, sr=sample_rate)[0] # duration=3.0\n", | |
"\n" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "-DdDrnqAmoxK", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"\n", | |
"\n", | |
"def logmel(waveform):\n", | |
" z = tf.contrib.signal.stft(waveform, 2048, 1024)\n", | |
" magnitudes = tf.abs(z)\n", | |
" filterbank = tf.contrib.signal.linear_to_mel_weight_matrix(\n", | |
" num_mel_bins=80,\n", | |
" num_spectrogram_bins=magnitudes.shape[-1].value,\n", | |
" sample_rate=sample_rate,\n", | |
" lower_edge_hertz=0.0,\n", | |
" upper_edge_hertz=8000.0)\n", | |
" melspectrogram = tf.tensordot(magnitudes, filterbank, 1)\n", | |
" return tf.log1p(melspectrogram)\n", | |
"\n", | |
"\n", | |
"with tf.Session():\n", | |
" spectrogram = logmel(waveform).eval()\n", | |
"\n", | |
"reconstructed_waveform = sonify(spectrogram, len(waveform), logmel)" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
}, | |
{ | |
"metadata": { | |
"id": "P4KPtAdfh9Er", | |
"colab_type": "code", | |
"colab": {} | |
}, | |
"cell_type": "code", | |
"source": [ | |
"from IPython.display import display, Audio\n", | |
"\n", | |
"display(Audio(waveform, rate=sample_rate))\n", | |
"display(Audio(reconstructed_waveform, rate=sample_rate))" | |
], | |
"execution_count": 0, | |
"outputs": [] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment