diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f350c74 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +# ignore Cordova/PhoneGap CLI directories +# in essence, everything but *.xdk file and www directory +/* +!/www +!/docs +!/demo +!/src +!/config.xml +!/README.md +!/LICENSE.md +!/LICENSE +!/package.json +!/.gitignore +!/plugin.xml diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..61c1228 --- /dev/null +++ b/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Exelerus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/README.md b/README.md new file mode 100644 index 0000000..e17c91e --- /dev/null +++ b/README.md @@ -0,0 +1,157 @@ +# cordova-plugin-audioinput + +Cordova plugin which provides real-time audio data capture from the device's microphone. +It can be used for apps that apply effects to microphone input using for example the HTML5 Web Audio API. + +Since 'Navigator.getUserMedia()' isn't supported by all browsers, this plugin enables similar functionality by forwarding raw audio data to the HTML5 app using continuous callbacks. + +It adds the following `window` events: + +* audioinput +* audioinputerror + +## Installation + +``` +cordova plugin add cordova-plugin-audioinput +``` + +or + +``` +cordova plugin add https://github.com/edimuj/cordova-plugin-audioinput.git +``` + +## Supported Platforms + +* Android +* iOS + +## Basic Usage Example + +```javascript + +// Start with default values and let the plugin handle conversion from raw data to web audio and will not send any events. +audioinput.start({ + streamToWebAudio: true +}); + +// Connect the audioinput to the device speakers in order to hear the captured sound. If an audio context is not provided, the plugin will create one for you. +audioinput.connect(audioinput.getAudioContext().destination); + +// Remember that this will create an audio feedback loop so lower the volume! + +``` + +## Advanced Usage Example - Events + +Use this event based method if you want more control over the capture process. + +Define a callback function to subscribe to `audioinput` events. +The callback function will continuously be called during capture, allowing your application to receive chunks of raw audio data. +You can also subscribe to error events `audioinputerror` as seen in the example below. + +```javascript +function onAudioInput(evt) { + + // 'evt.data' is an integer array containing normalized audio data. + // + console.log("Audio data received: " + evt.data.length + " samples"); + + // ... do something with the evt.data array ... +} + +// Listen to audioinput events. +window.addEventListener("audioinput", onAudioInput, false); + +var onAudioInputError = function(error) { + alert("onAudioInputError event recieved: " + error); +}; + +// Listen to audioinputerror events. +window.addEventListener("audioinputerror", onAudioInput, false); + +``` + +After the Cordova `deviceready` event has fired: + +```javascript +var captureCfg = { + sampleRate: 44100, + bufferSize: 8192, + channels: 1, + format: 'PCM_16BIT' +}; + +// Start capturing audio from the microphone +audioinput.start(captureCfg); + +// Stop capturing audio input +audioinput.stop() +``` + +## Demo +The `demo` folder contains examples showing both basic and advanced usage, where the captured microphone audio data is used to playback the audio to the device speaker using the Web Audio API. + +## API +Start capturing audio from the microphone: + +```javascript +audioinput.start( captureCfg ); +``` + +Where `captureCfg` can contain any of the following parameters (Please note that not all audio configurations are supported by all devices): + +```javascript +var captureCfg = { + sampleRate: 44100, // The Sample Rate in Hz. Default: 44100. + bufferSize: 8192, // Maximum size in bytes of the capture buffer. Default: 16384. + channels: 1, // The number of channels to use: Mono (1) or Stereo (2). Default: 1. + format: 'PCM_16BIT' // The audio format. Currently PCM_16BIT and PCM_8BIT are supported. Default: 'PCM_16BIT'. + normalize // Specifies if the audio data should be normalized or not. Default: true. + normalizationFactor // Specifies the factor to use when normalization is performed. Default: 32767.0. + streamToWebAudio // If set to true, the plugin will handle all conversion of the data to web audio. The audioplugin can then act as an AudioNode that can be connected to your web audio node chain. Default: false + audioContext // Used in conjunction with streamToWebAudio. If no audioContext is given, one will be created by the plugin. + concatenateMaxChunks // Defines how many chunks will be merged each time, a low value means lower latency but requires more CPU resources. Default: 10. +}; +``` + +Stop capturing audio from the microphone: + +```javascript +audioinput.stop(); +``` + +Check if the audioinput plugin is capturing, i.e. started or not: + +```javascript +audioinput.isCapturing(); +``` + +Get the current configuration from the audioinput plugin: + +```javascript +audioinput.getCfg(); +``` + +When using `streamToWebAudio` you can connect the audioinput plugin to your web audio node chain: + +```javascript +audioinput.connect( audioNode ); +``` + +When using `streamToWebAudio` you can disconnect the previously connected audioinput plugin from your your web audio node chain: + +```javascript +audioinput.disconnect(); +``` + +When using `streamToWebAudio`, and have not supplied the audioinput plugin with an audio context, the following method is used to get the internally created audio context: + +```javascript +audioinput.getAudioContext(); +``` + +##Credits + +The plugin is created and maintained by Edin Mujkanovic. \ No newline at end of file diff --git a/demo/advanceddemo.html b/demo/advanceddemo.html new file mode 100644 index 0000000..77fcbda --- /dev/null +++ b/demo/advanceddemo.html @@ -0,0 +1,45 @@ + + + + Cordova Plugin AudioInput Demo + + + + + + + + +
+

Cordova AudioInput Plugin
+ Advanced Demo

+
+
Not ready
+
+
+ + Hz +
+ + bytes +
+ + Mono + Stereo +
+ + PCM 8BIT + PCM 16BIT
+
+ Start Capture + Stop Capture +
+
+ Open the Basic Demo +
+ + + + + diff --git a/demo/advanceddemo.js b/demo/advanceddemo.js new file mode 100644 index 0000000..73a0b1f --- /dev/null +++ b/demo/advanceddemo.js @@ -0,0 +1,257 @@ +// Web Audio API +var audioContext, micGainNode; + +// Capture configuration object +var captureCfg = {}; + +// Queue +var audioDataQueue = [], + capturing = false; + +// How many data chunks should be joined before playing them +var concatenateMaxChunks = 10; + +// Timers +var timerGetNextAudio, timerInterVal, timerGenerateSimulatedData; + +// Info/Debug +var totalReceivedData = 0, + totalPlayedData = 0; + + +/** + * Called continuously while AudioInput capture is running. + * + * * @param evt The audioinput event keys: data (A float array containing normalized audio data (-1.0 to +1.0)) + */ +function onAudioInputCapture(evt) { + try { + if (evt && evt.data) { + // Increase the debug counter for received data + totalReceivedData += evt.data.length; + + // Push the data to the audio queue (array) + audioDataQueue.push(evt.data); + } + else { + alert("Unknown audioinput event!"); + } + } + catch (ex) { + alert("onAudioInputCapture ex: " + ex); + } +} + + +/** + * + * @param error + */ +function onAudioInputError(error) { + alert("onAudioInputError event recieved: " + error); +} + + +/** + * Consumes data from the audioinput queue and calls the playAudio method + */ +var getNextToPlay = function () { + var duration = 50; + + // Check if there is any data in the queue + if (audioDataQueue.length > 0) { + + // Concatenate up to concatenateMaxChunks data arrays from the queue + var concatenatedData = []; + for (var i = 0; i < concatenateMaxChunks; i++) { + if (audioDataQueue.length === 0) { + break; + } + concatenatedData = concatenatedData.concat(audioDataQueue.shift()); + } + + // Play the audio + duration = playAudio(concatenatedData) * 1000; + } + + // Still capturing? Then call myself to continue consuming incoming data. + if (capturing) { + timerGetNextAudio = setTimeout(getNextToPlay, duration); + } +}; + + +/** + * Play audio using the Web Audio API + */ +var playAudio = function (data) { + + // Create an audio buffer to hold the data + var audioBuffer = audioContext.createBuffer(captureCfg.channels, (data.length / captureCfg.channels), + captureCfg.sampleRate); + + // Initialize the audio buffer with the data + if (captureCfg.channels > 1) { + // For multiple channels (stereo) we assume that the data is interleaved + for (var i = 0; i < captureCfg.channels; i++) { + var chdata = [], + index = 0; + + while (index < data.length) { + chdata.push(data[index + i]); + index += parseInt(captureCfg.channels); + } + + audioBuffer.getChannelData(i).set(chdata); + } + } + else { + // For just one channels (mono) + audioBuffer.getChannelData(0).set(data); + } + + // Create a buffer source based on the audio buffer + var source = audioContext.createBufferSource(); + source.buffer = audioBuffer; + + // Connect the buffer source to the gain node + source.connect(micGainNode); + + // Play the audio immediately + source.start(0); + + // Increase the debug counter for played data + totalPlayedData += data.length; + + // Return the duration of the sound so that we can play the next sound when ended. + return audioBuffer.duration; +}; + + + +/** + * Creates the Web Audio Context and audio nodes for output. + */ +var initWebAudio = function () { + try { + window.AudioContext = window.AudioContext || window.webkitAudioContext; + audioContext = new AudioContext(); + consoleMessage("Web Audio Context is ready"); + } + catch (e) { + consoleMessage('Web Audio API is not supported in this browser: ' + e); + return false; + } + + // Create a gain node for volume control + micGainNode = audioContext.createGain(); + + // Connect the gain node to the speaker + micGainNode.connect(audioContext.destination); + + return true; +}; + + +/** + * Initializes the events for the start and stop buttons. + */ +var initUIEvents = function () { + + // Start Audio capture + // + document.getElementById("startCapture").addEventListener("click", function () { + capturing = true; + + // Get the audio capture configuration from the UI elements + // + captureCfg = { + sampleRate: parseInt(document.getElementById('sampleRate').value), + bufferSize: parseInt(document.getElementById('bufferSize').value), + channels: parseInt(document.querySelector('input[name="channels"]:checked').value), + format: document.querySelector('input[name="format"]:checked').value + }; + + if (isMobile.any() && window.audioinput) { + audioinput.start(captureCfg); + consoleMessage("Microphone input started!"); + } + else { + // todo: Add Navigator.GetUserMedia() instead? + + // On desktop we instead generate some audio input data + generateSimulatedAudioInput(captureCfg.bufferSize); + //timerGenerateSimulatedData = setInterval(generateSimulatedAudioInput, 100); + + consoleMessage("Simulated input started (desktop)!"); + } + + // Start the Interval that outputs time and debug data while capturing + // + timerInterVal = setInterval(function () { + if (capturing) { + document.getElementById("infoTimer").innerHTML = "" + + new Date().toTimeString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, "$1") + + "|Received:" + totalReceivedData + "|Played:" + totalPlayedData; + } + }, 1000); + + + // Start the audio queue consumer + // + getNextToPlay(); + }); + + // Stop Audio capture and reset everything + // + document.getElementById("stopCapture").addEventListener("click", function () { + + capturing = false; + clearInterval(timerInterVal); + + if (isMobile.any() && window.audioinput) { + audioinput.stop(); + } + else { + clearInterval(timerGenerateSimulatedData); + } + + audioDataQueue = []; + totalReceivedData = 0; + totalPlayedData = 0; + + document.getElementById("infoTimer").innerHTML = ""; + consoleMessage("Stopped"); + }); +}; + + +/** + * When cordova fires the deviceready event, we initialize everything needed for audio input. + */ +var onDeviceReady = function () { + + consoleMessage("Use 'Start Capture' to begin..."); + + initUIEvents(); + + if (initWebAudio()) { + consoleMessage("Use 'Start Capture' to begin..."); + } + + // Subscribe to audioinput events + // + window.addEventListener('audioinput', onAudioInputCapture, false); + window.addEventListener('audioinputerror', onAudioInputError, false); +}; + +// Make it possible to run the demo on desktop +if (!window.cordova) { + // Make it possible to run the demo on desktop + console.log("Running on desktop!"); + onDeviceReady(); +} +else { + // For Cordova apps + document.addEventListener('deviceready', onDeviceReady, false); +} diff --git a/demo/basicdemo.html b/demo/basicdemo.html new file mode 100644 index 0000000..914d6c3 --- /dev/null +++ b/demo/basicdemo.html @@ -0,0 +1,31 @@ + + + + Cordova Plugin AudioInput Demo + + + + + + + + +
+

Cordova AudioInput Plugin
+ Basic Demo

+
Not ready
+
+
+ Start Capture + Stop Capture +
+
+ Open the Advanced Demo +
+ + + + + + diff --git a/demo/basicdemo.js b/demo/basicdemo.js new file mode 100644 index 0000000..430da0d --- /dev/null +++ b/demo/basicdemo.js @@ -0,0 +1,73 @@ +/** + * Initializes the events for the start and stop buttons. + */ +var initUIEvents = function () { + document.getElementById("startCapture").addEventListener("click", function () { + startCapture(); + }); + + document.getElementById("stopCapture").addEventListener("click", function () { + stopCapture(); + }); +}; + + +/** + * Start Audio capture + */ +var startCapture = function () { + try { + if (window.audioinput) { + + // Start with default values and let the plugin handle conversion from raw data to web audio + audioinput.start({ + streamToWebAudio: true + }); + + // Connect the audioinput to the speaker(s) in order to hear the captured sound + audioinput.connect(audioinput.getAudioContext().destination); + + consoleMessage("Capturing audio!"); + //alert("Microphone input started with the following configuration: " + JSON.stringify(window.audioinput.getCfg())) + } + else { + consoleMessage("audioinput plugin is not available!"); + } + } + catch(ex) { + alert("startCapture exception: " + ex); + } +}; + + +/** + * Stop Audio capture + */ +var stopCapture = function () { + if (window.audioinput) { + audioinput.stop(); + } + + consoleMessage("Stopped!"); +}; + + +/** + * When cordova fires the deviceready event, we initialize everything needed for audio input. + */ +var onDeviceReady = function () { + + initUIEvents(); + + consoleMessage("Use 'Start Capture' to begin..."); +}; + +// Make it possible to run the demo on desktop +if (!window.cordova) { + console.log("Running on desktop!"); + onDeviceReady(); +} +else { + // For Cordova apps + document.addEventListener('deviceready', onDeviceReady, false); +} diff --git a/demo/index.html b/demo/index.html new file mode 100644 index 0000000..77fcbda --- /dev/null +++ b/demo/index.html @@ -0,0 +1,45 @@ + + + + Cordova Plugin AudioInput Demo + + + + + + + + +
+

Cordova AudioInput Plugin
+ Advanced Demo

+
+
Not ready
+
+
+ + Hz +
+ + bytes +
+ + Mono + Stereo +
+ + PCM 8BIT + PCM 16BIT
+
+ Start Capture + Stop Capture +
+
+ Open the Basic Demo +
+ + + + + diff --git a/demo/style.css b/demo/style.css new file mode 100644 index 0000000..2fc4256 --- /dev/null +++ b/demo/style.css @@ -0,0 +1,15 @@ +/* Disable text selection for all elements except input elements */ +* { + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: -moz-none; + -o-user-select: none; + user-select: none; +} +input { + -webkit-user-select: text; + -khtml-user-select: text; + -moz-user-select: text; + -o-user-select: text; + user-select: text; +} diff --git a/demo/utils.js b/demo/utils.js new file mode 100644 index 0000000..1ad6cd6 --- /dev/null +++ b/demo/utils.js @@ -0,0 +1,53 @@ +// Determines which platform the demo runs on +var isMobile = { + Android: function () { + return /Android/i.test(navigator.userAgent); + }, + BlackBerry: function () { + return /BlackBerry/i.test(navigator.userAgent); + }, + iOS: function () { + return /iPhone|iPad|iPod/i.test(navigator.userAgent); + }, + any: function () { + return (isMobile.Android() || isMobile.BlackBerry() || isMobile.iOS()); + } +}; + + +/** + * Debug output messages + * + * @param msg The message to show + */ +var consoleMessage = function (msg) { + console.log(msg); + document.getElementById("infoMessage").innerHTML = msg; +}; + + +/** + * Generates some random audio input data and dispatches an audioinput event for each buffer + * + * @param bufferSize + * @param numberOfIterations + */ +var generateSimulatedAudioInput = function (bufferSize, numberOfIterations) { + + var bufSize = bufferSize || 16000, + iterations = numberOfIterations || 100; + + for (var i = 0; i < iterations; i++) { + var data = []; + + for (var k = 0; k < bufSize; k++) { + data.push((parseFloat(Math.random() * 100.0) - 50.0)); + } + + // Dispatch an event + var event = new CustomEvent('audioinput'); + event.data = data; + window.dispatchEvent(event); + } +}; + diff --git a/package.json b/package.json new file mode 100644 index 0000000..0e7d855 --- /dev/null +++ b/package.json @@ -0,0 +1,25 @@ +{ + "name": "cordova-plugin-audioinput", + "version": "0.0.1", + "description": "This plugin enables audio capture from the device's microphone, by forwarding raw audio data using callbacks.", + "cordova": { + "id": "cordova-plugin-audioinput", + "platforms": [ + "android" + ] + }, + "repository": { + "type": "git", + "url": "git+https://github.com/edimuj/cordova-plugin-audioinput.git" + }, + "keywords": [ + "ecosystem:cordova", + "cordova-android" + ], + "author": "Edin Mujkanovic", + "license": "MIT", + "bugs": { + "url": "https://github.com/edimuj/cordova-plugin-audioinput/issues" + }, + "homepage": "https://github.com/edimuj/cordova-plugin-audioinput#readme" +} \ No newline at end of file diff --git a/plugin.xml b/plugin.xml new file mode 100644 index 0000000..3e25c9d --- /dev/null +++ b/plugin.xml @@ -0,0 +1,59 @@ + + + + Audio Input + Cordova plugin which provides real-time audio data capture from the device's microphone. + Edin Mujkanovic + MIT + cordova,media,microphone,mic,input,audio + https://github.com/edimuj/cordova-plugin-audioinput.git + https://github.com/edimuj/cordova-plugin-audioinput/issues + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/android/AudioInputCapture.java b/src/android/AudioInputCapture.java new file mode 100644 index 0000000..fc6b8c8 --- /dev/null +++ b/src/android/AudioInputCapture.java @@ -0,0 +1,170 @@ +package com.exelerus.cordova.audioinputcapture; + +import org.apache.cordova.CordovaPlugin; +import org.apache.cordova.CallbackContext; +import org.apache.cordova.PluginResult; + +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import java.lang.ref.WeakReference; + +import android.os.Handler; +import android.os.Message; +import android.util.Log; +import android.content.pm.PackageManager; +import org.apache.cordova.PermissionHelper; +import android.Manifest; + + +public class AudioInputCapture extends CordovaPlugin +{ + private static final String LOG_TAG = "AudioInputCapture"; + + private CallbackContext callbackContext = null; + + private AudioInputReceiver receiver; + + private final AudioInputCaptureHandler handler = new AudioInputCaptureHandler(this); + + public static String[] permissions = { Manifest.permission.RECORD_AUDIO }; + public static int RECORD_AUDIO = 0; + public static final int PERMISSION_DENIED_ERROR = 20; + + private int sampleRate = 44100; + private int bufferSize = 4096; + private int channels = 1; + private String format = null; + + @Override + public boolean execute(String action, JSONArray args, CallbackContext callbackContext) throws JSONException { + if (action.equals("start")) { + if (this.callbackContext != null) { + callbackContext.error( "AudioInputCapture listener already running."); + return true; + } + this.callbackContext = callbackContext; + + try { + // cfg.sampleRate, cfg.bufferSize, cfg.channels, cfg.format + this.sampleRate = args.getInt(0); + this.bufferSize = args.getInt(1); + this.channels = args.getInt(2); + this.format = args.getString(3); + + promptForRecord(); + } catch (Exception e) { + e.printStackTrace(); + receiver.interrupt(); + } + + // Don't return any result now, since status results will be sent when events come in from broadcast receiver + PluginResult pluginResult = new PluginResult(PluginResult.Status.NO_RESULT); + pluginResult.setKeepCallback(true); + callbackContext.sendPluginResult(pluginResult); + return true; + } + + else if (action.equals("stop")) { + receiver.interrupt(); + this.sendUpdate(new JSONObject(), false); // release status callback in JS side + this.callbackContext = null; + callbackContext.success(); + return true; + } + + return false; + } + + public void onDestroy() { + if (!receiver.isInterrupted()) { + receiver.interrupt(); + } + } + + public void onReset() { + if (!receiver.isInterrupted()) { + receiver.interrupt(); + } + } + + /** + * Create a new plugin result and send it back to JavaScript + */ + private void sendUpdate(JSONObject info, boolean keepCallback) { + if (this.callbackContext != null) { + PluginResult result = new PluginResult(PluginResult.Status.OK, info); + result.setKeepCallback(keepCallback); + this.callbackContext.sendPluginResult(result); + } + } + + private static class AudioInputCaptureHandler extends Handler { + private final WeakReference mActivity; + + public AudioInputCaptureHandler(AudioInputCapture activity) { + mActivity = new WeakReference(activity); + } + + @Override + public void handleMessage(Message msg) { + AudioInputCapture activity = mActivity.get(); + if (activity != null) { + JSONObject info = new JSONObject(); + try { + info.put("data", msg.getData().getString("data")); + } + catch (JSONException e) { + Log.e(LOG_TAG, e.getMessage(), e); + } + activity.sendUpdate(info, true); + } + } + } + + + + /** + * Prompt user for record audio permission + */ + protected void getMicPermission(int requestCode) + { + PermissionHelper.requestPermission(this, requestCode, permissions[RECORD_AUDIO]); + } + + /** + * Ensure that we have gotten record audio permission + */ + private void promptForRecord() + { + if(PermissionHelper.hasPermission(this, permissions[RECORD_AUDIO])) { + receiver = new AudioInputReceiver(this.sampleRate, this.bufferSize, this.channels, this.format); + receiver.setHandler(handler); + receiver.start(); + } + else + { + getMicPermission(RECORD_AUDIO); + } + } + + /** + * Handle request permission result + */ + public void onRequestPermissionResult(int requestCode, String[] permissions, + int[] grantResults) throws JSONException + { + for(int r:grantResults) + { + if(r == PackageManager.PERMISSION_DENIED) + { + this.callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR, + PERMISSION_DENIED_ERROR)); + return; + } + } + + promptForRecord(); + } +} diff --git a/src/android/AudioInputReceiver.java b/src/android/AudioInputReceiver.java new file mode 100644 index 0000000..1306812 --- /dev/null +++ b/src/android/AudioInputReceiver.java @@ -0,0 +1,109 @@ +package com.exelerus.cordova.audioinputcapture; + +import android.media.AudioFormat; +import android.media.AudioRecord; +import android.media.MediaRecorder; + +import android.os.Bundle; +import android.os.Handler; +import android.os.Message; +import java.util.Arrays; + +import android.util.Base64; + +public class AudioInputReceiver extends Thread { + + private int channelConfig = AudioFormat.CHANNEL_IN_MONO; + + private int audioFormat = AudioFormat.ENCODING_PCM_16BIT; + + private int sampleRateInHz = 44100; + + private int bufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat); + + private AudioRecord recorder; + + private Handler handler; + + private Message message; + + private Bundle messageBundle = new Bundle(); + + public AudioInputReceiver() { + recorder = new AudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION, sampleRateInHz, channelConfig, audioFormat, bufferSize); + } + + public AudioInputReceiver(int sampleRate, int bufferSizeInBytes, int channels, String format) { + + sampleRateInHz = sampleRate; + + if (bufferSizeInBytes > bufferSize) { + bufferSize = bufferSizeInBytes; + } + + switch (channels) { + case 2: + channelConfig = AudioFormat.CHANNEL_IN_STEREO; + break; + case 1: + default: + channelConfig = AudioFormat.CHANNEL_IN_MONO; + break; + } + + if(format == "PCM_8BIT") { + audioFormat = AudioFormat.ENCODING_PCM_8BIT; + } + else { + audioFormat = AudioFormat.ENCODING_PCM_16BIT; + } + + recorder = new AudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION, sampleRateInHz, channelConfig, audioFormat, bufferSize); + } + + public void setHandler(Handler handler) { + this.handler = handler; + } + + @Override + public void run() { + int numReadBytes = 0; + short audioBuffer[] = new short[bufferSize]; + + synchronized(this) + { + recorder.startRecording(); + + while (!isInterrupted()) { + numReadBytes = recorder.read(audioBuffer, 0, bufferSize); + + if (numReadBytes > 0) { + + try { + //String decoded = Base64.encodeToString(audioBuffer,Base64.NO_WRAP); + String decoded = Arrays.toString(audioBuffer); + + // send data to handler + message = handler.obtainMessage(); + messageBundle.putString("data", decoded); + message.setData(messageBundle); + handler.sendMessage(message); + } + catch(Exception ex) { + message = handler.obtainMessage(); + messageBundle.putString("error", ex.toString()); + message.setData(messageBundle); + handler.sendMessage(message); + } + } + } + + if (recorder.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) { + recorder.stop(); + } + + recorder.release(); + recorder = null; + } + } +} diff --git a/src/ios/AudioReceiver.h b/src/ios/AudioReceiver.h new file mode 100644 index 0000000..4929979 --- /dev/null +++ b/src/ios/AudioReceiver.h @@ -0,0 +1,57 @@ +// +// AudioReceiver.h +// AudioReceiver +// +// Created by Edin Mujkanovic on 2016-02-06. +// +// + + +#import +#import +#include +#include +#include + +#define kNumberBuffers 10 + +typedef struct { + __unsafe_unretained id mSelf; + AudioStreamBasicDescription mDataFormat; + AudioQueueRef mQueue; + AudioQueueBufferRef mBuffers[kNumberBuffers]; + UInt32 bufferByteSize; + SInt64 mCurrentPacket; + bool mIsRunning; +} AQRecordState; + + +@interface AudioReceiver : NSObject + + @property (nonatomic, assign) id delegate; + + @property (nonatomic, assign) AQRecordState recordState; + + @property (nonatomic) int mySampleRate; + @property (nonatomic) int myBufferSize; + @property (nonatomic) short myChannels; + @property (nonatomic) short myBitRate; + @property (nonatomic) NSString* myFormat; + +- (void)start; +- (void)stop; +- (void)pause; +- (void)dealloc; +- (AudioReceiver*)init:(int)sampleRate bufferSize:(int)bufferSizeInBytes noOfChannels:(short)channels audioFormat: + (NSString*)format; +- (void)didReceiveAudioData:(short*)samples dataLength:(int)length; +- (void)hasError:(int)statusCode:(char*)file:(int)line; + +@end + + +@protocol AudioReceiverProtocol + +- (void)didReceiveAudioData:(short*)data dataLength:(int)length; + +@end \ No newline at end of file diff --git a/src/ios/AudioReceiver.m b/src/ios/AudioReceiver.m new file mode 100644 index 0000000..01d54c5 --- /dev/null +++ b/src/ios/AudioReceiver.m @@ -0,0 +1,184 @@ +// +// AudioReceiver.m +// AudioReceiver +// +// Created by Edin Mujkanovic on 2016-02-06. +// +// + +#import "AudioReceiver.h" + + +/** + Audio Input callback + */ +void HandleInputBuffer(void* inUserData, + AudioQueueRef inAQ, + AudioQueueBufferRef inBuffer, + const AudioTimeStamp* inStartTime, + UInt32 inNumPackets, + const AudioStreamPacketDescription* inPacketDesc) { + + AQRecordState* pRecordState = (AQRecordState *)inUserData; + + if (inNumPackets == 0 && pRecordState->mDataFormat.mBytesPerPacket != 0) { + inNumPackets = inBuffer->mAudioDataByteSize / pRecordState->mDataFormat.mBytesPerPacket; + } + + if ( ! pRecordState->mIsRunning) { + return; + } + + long sampleStart = pRecordState->mCurrentPacket; + long sampleEnd = pRecordState->mCurrentPacket + inBuffer->mAudioDataByteSize / pRecordState->mDataFormat.mBytesPerPacket - 1; + + short* samples = (short*)inBuffer->mAudioData; + long nsamples = sampleEnd - sampleStart + 1; + + pRecordState->mCurrentPacket += inNumPackets; + + AudioQueueEnqueueBuffer(pRecordState->mQueue, inBuffer, 0, NULL); + + [pRecordState->mSelf didReceiveAudioData:samples dataLength:(int)nsamples]; + + /*if (pRecordState->mIsRunning) { + [(AMRecorder *)pRecordState->mSelf stop]; + }*/ +} + + + +/** + AudioReceiver class implementation + */ +@implementation AudioReceiver + +@synthesize mySampleRate, myBufferSize, myChannels, myBitRate, myFormat, delegate; + +/** + Init instance + */ +- (AudioReceiver*)init:(int)sampleRate bufferSize:(int)bufferSizeInBytes noOfChannels:(short)channels audioFormat: + (NSString*)format +{ + static const int maxBufferSize = 0x10000; + + if (self) { + OSStatus status = noErr; + + AudioSessionInitialize(NULL, + NULL, + nil, + (__bridge void*) self); + + UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord; + AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, + sizeof(sessionCategory), + &sessionCategory); + AudioSessionSetActive(true); + + int bitRate = 16; + + if([format isEqualToString:@"PCM_8BIT"]){ + bitRate = 8; + } + else { + bitRate = 16; + } + + _recordState.mDataFormat.mFormatID = kAudioFormatLinearPCM; + _recordState.mDataFormat.mSampleRate = 44100.00; //(float)sampleRate; + _recordState.mDataFormat.mBitsPerChannel = 16; //bitRate; + _recordState.mDataFormat.mChannelsPerFrame = 1; //channels; + _recordState.mDataFormat.mFramesPerPacket = 1; + _recordState.mDataFormat.mBytesPerFrame = _recordState.mDataFormat.mBytesPerPacket = _recordState.mDataFormat.mChannelsPerFrame * sizeof(SInt16); + _recordState.mDataFormat.mReserved = 0; + _recordState.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; + // kLinearPCMFormatFlagIsNonInterleaved | + _recordState.bufferByteSize = (UInt32) MIN(bufferSizeInBytes, maxBufferSize); + } + + return self; +} + + +/** + Start Audio Input capture + */ +- (void)start { + OSStatus status = noErr; + + _recordState.mCurrentPacket = 0; + _recordState.mSelf = self; + + status = AudioQueueNewInput(&_recordState.mDataFormat, + HandleInputBuffer, + &_recordState, + CFRunLoopGetCurrent(), + kCFRunLoopCommonModes, + 0, + &_recordState.mQueue); + [self hasError:status:__FILE__:__LINE__]; + + for (int i = 0; i < kNumberBuffers; i++) { + status = AudioQueueAllocateBuffer(_recordState.mQueue, _recordState.bufferByteSize, &_recordState.mBuffers[i]); + [self hasError:status:__FILE__:__LINE__]; + + status = AudioQueueEnqueueBuffer(_recordState.mQueue, _recordState.mBuffers[i], 0, NULL); + [self hasError:status:__FILE__:__LINE__]; + } + + _recordState.mIsRunning = YES; + status = AudioQueueStart(_recordState.mQueue, NULL); + [self hasError:status:__FILE__:__LINE__]; +} + + +/** + Stop Audio Input capture + */ +- (void)stop { + if (_recordState.mIsRunning) { + AudioQueueStop(_recordState.mQueue, true); + _recordState.mIsRunning = false; + } +} + + +/** + Pause Audio Input capture + */ +- (void)pause { + AudioQueuePause(_recordState.mQueue); +} + + +/** + Deallocate audio queue + */ +- (void)dealloc { + AudioQueueDispose(_recordState.mQueue, true); +} + + +/** + Forward sample data + */ +- (void)didReceiveAudioData:(short*)samples dataLength:(int)length { + [self.delegate didReceiveAudioData:samples dataLength:length]; +} + + +/** + Debug + */ +-(void)hasError:(int)statusCode:(char*)file:(int)line +{ + if (statusCode) { + NSLog(@"Error Code responded %d in file %s on line %d\n", statusCode, file, line); + exit(-1); + } +} + + +@end diff --git a/src/ios/CDVAudioInputCapture.m b/src/ios/CDVAudioInputCapture.m new file mode 100644 index 0000000..bb1dbbe --- /dev/null +++ b/src/ios/CDVAudioInputCapture.m @@ -0,0 +1,142 @@ +// +// CDVAudioInputCapture.m +// CDVAudioInputCapture +// +// Created by Edin Mujkanovic on 2016-02-06. +// +// + +#import +#import "AudioReceiver.h" + +@interface CDVAudioInputCapture : CDVPlugin { +} + +@property (strong, nonatomic) AudioReceiver* audioReceiver; +@property (strong) NSString* callbackId; + +- (void)start:(CDVInvokedUrlCommand*)command; +- (void)stop:(CDVInvokedUrlCommand*)command; +- (void)didReceiveAudioData:(short*)data dataLength:(int)length; + +@end + +@implementation CDVAudioInputCapture + +- (void)pluginInitialize +{ + NSNotificationCenter* listener = [NSNotificationCenter defaultCenter]; + + [listener addObserver:self + selector:@selector(didEnterBackground) + name:UIApplicationDidEnterBackgroundNotification + object:nil]; + + [listener addObserver:self + selector:@selector(willEnterForeground) + name:UIApplicationWillEnterForegroundNotification + object:nil]; +} + + +- (void)start:(CDVInvokedUrlCommand*)command +{ + self.callbackId = command.callbackId; + + int sampleRate = [[command.arguments objectAtIndex:0] intValue]; + int bufferSizeInBytes = [[command.arguments objectAtIndex:1] intValue]; + short channels = [[command.arguments objectAtIndex:2] intValue]; + NSString* format = [command.arguments objectAtIndex:3]; + + self.audioReceiver = [[AudioReceiver alloc] init:sampleRate bufferSize:bufferSizeInBytes noOfChannels:channels + audioFormat:format]; + + self.audioReceiver.delegate = self; + + [self.audioReceiver start]; +} + + +- (void)stop:(CDVInvokedUrlCommand*)command +{ + [self.audioReceiver stop]; + + if (self.callbackId) { + CDVPluginResult* result = [CDVPluginResult resultWithStatus:CDVCommandStatus_OK messageAsDouble:0.0f]; + [result setKeepCallbackAsBool:NO]; + [self.commandDelegate sendPluginResult:result callbackId:self.callbackId]; + } + + self.callbackId = nil; +} + + +- (void)didReceiveAudioData:(short*)data dataLength:(int)length +{ + @try { + NSMutableArray *mutableArray = [NSMutableArray arrayWithCapacity:length]; + + if(length == 0) { + if (self.callbackId) { + CDVPluginResult* result = [CDVPluginResult resultWithStatus:CDVCommandStatus_ERROR + messageAsString:@"No data received from input device"]; + [result setKeepCallbackAsBool:YES]; + [self.commandDelegate sendPluginResult:result callbackId:self.callbackId]; + } + } + else { + for (int i = 0; i < length; i++) { + NSNumber *number = [[NSNumber alloc] initWithShort:data[i]]; + [mutableArray addObject:number]; + //[number release]; + } + + NSString *str = [mutableArray componentsJoinedByString:@","]; + NSString *dataStr = [NSString stringWithFormat:@"[%@]", str]; + NSDictionary* audioData = [NSDictionary dictionaryWithObject:[NSString stringWithString:dataStr] forKey:@"data"]; + + if (self.callbackId) { + CDVPluginResult* result = [CDVPluginResult resultWithStatus:CDVCommandStatus_OK messageAsDictionary:audioData]; + [result setKeepCallbackAsBool:YES]; + [self.commandDelegate sendPluginResult:result callbackId:self.callbackId]; + } + } + } + @catch (NSException *exception) { + if (self.callbackId) { + CDVPluginResult* result = [CDVPluginResult resultWithStatus:CDVCommandStatus_ERROR + messageAsString:@"Exception in didReceiveAudioData"]; + [result setKeepCallbackAsBool:YES]; + [self.commandDelegate sendPluginResult:result callbackId:self.callbackId]; + } + } +} + + +- (void)dealloc +{ + [[NSNotificationCenter defaultCenter] removeObserver:self name:UIApplicationDidEnterBackgroundNotification object:nil]; + [[NSNotificationCenter defaultCenter] removeObserver:self name:UIApplicationWillEnterForegroundNotification object:nil]; + + [self stop:nil]; +} + + +- (void)onReset +{ + [self stop:nil]; +} + + +- (void)didEnterBackground +{ + [self.audioReceiver pause]; +} + + +- (void)willEnterForeground +{ + [self.audioReceiver start]; +} + +@end diff --git a/www/audioInputCapture.js b/www/audioInputCapture.js new file mode 100644 index 0000000..4632cc8 --- /dev/null +++ b/www/audioInputCapture.js @@ -0,0 +1,334 @@ +/*var cordova = require('cordova'), + exec = require('cordova/exec');*/ + +var argscheck = require('cordova/argscheck'), + utils = require('cordova/utils'), + exec = require('cordova/exec'), + channel = require('cordova/channel'); + + +var audioinput = { + audioContext: null, + audioDataQueue: [], + capturing: false, + concatenateMaxChunks: 10, + timerGetNextAudio: 0 +}; + +/** + * Start capture of Audio input + * + * @param {Object} cfg + * keys: + * sampleRateInHz (44100), + * bufferSize (16384), + * channels (1 (mono) or 2 (stereo)), + * format ('PCM_8BIT' or 'PCM_16BIT'), + * normalize (true || false), + * normalizationFactor (create float data by dividing the audio data with this factor; default: 32767.0) + * streamToWebAudio (The plugin will handle all the conversion of raw data to audio) + * audioContext (If no audioContext is given, one will be created) + * concatenateMaxChunks (How many packets will be merged each time, low = low latency but can require more resources) + */ +audioinput.start = function (cfg) { + try { + if (!cfg) { + cfg = {}; + } + + audioinput.cfg = {}; + audioinput.cfg.sampleRate = cfg.sampleRate || 44100; + audioinput.cfg.bufferSize = cfg.bufferSize || 4096; + audioinput.cfg.channels = cfg.channels || 1; + audioinput.cfg.format = cfg.format || 'PCM_16BIT'; + audioinput.cfg.normalize = cfg.normalize || true; + audioinput.cfg.normalizationFactor = cfg.normalizationFactor || 32767.0; + audioinput.cfg.streamToWebAudio = cfg.streamToWebAudio || false; + audioinput.cfg.audioContext = cfg.audioContext || null; + audioinput.cfg.concatenateMaxChunks = cfg.concatenateMaxChunks || 10; + + if (audioinput.cfg.channels < 1 && audioinput.cfg.channels > 2) { + throw "Invalid number of channels (" + audioinput.cfg.channels + "). Only mono (1) and stereo (2) is" + + " supported."; + } + else if (audioinput.cfg.format != "PCM_16BIT" && audioinput.cfg.format != "PCM_8BIT") { + throw "Invalid format (" + audioinput.cfg.format + "). Only 'PCM_8BIT' and 'PCM_16BIT' is" + + " supported."; + } + + exec(audioinput.audioInputEvent, audioinput.error, "AudioInputCapture", "start", [audioinput.cfg.sampleRate, audioinput.cfg.bufferSize, audioinput.cfg.channels, audioinput.cfg.format]); + + if (audioinput.cfg.streamToWebAudio) { + audioinput._initWebAudio(audioinput.cfg.audioContext); + audioinput.audioDataQueue = []; + audioinput._getNextToPlay(); + } + + audioinput.capturing = true; + } + catch (ex) { + throw "Failed to start audioinput due to: " + ex; + } +}; + + +/** + * Stop capturing audio + */ +audioinput.stop = function () { + exec(null, audioinput.error, "AudioInputCapture", "stop", []); + if (audioinput.cfg.streamToWebAudio) { + if (audioinput.timerGetNextAudio) { + clearTimeout(audioinput.timerGetNextAudio); + } + audioinput.audioDataQueue = null; + } + + audioinput.capturing = false; +}; + + +/** + * Connect the audio node + * + * @param audioNode + */ +audioinput.connect = function (audioNode) { + if (audioinput.micGainNode) { + audioinput.disconnect(); + audioinput.micGainNode.connect(audioNode); + } +}; + +/** + * Disconnect the audio node + */ +audioinput.disconnect = function () { + if (audioinput.micGainNode) { + audioinput.micGainNode.disconnect(); + } +}; + +/** + * Returns the internally created Web Audio Context (if any exists) + * + * @returns {*} + */ +audioinput.getAudioContext = function () { + return audioinput.audioContext; +}; + +/** + * + * @returns {*} + */ +audioinput.getCfg = function () { + return audioinput.cfg; +}; + +/** + * + * @returns {boolean|Array} + */ +audioinput.isCapturing = function () { + return audioinput.capturing; +}; + +/** + * Callback for audio input + * + * @param {Object} audioInputData keys: data (PCM) + */ +audioinput.audioInputEvent = function (audioInputData) { + try { + if (audioInputData && audioInputData.data && audioInputData.data.length > 0) { + var audioData = audioInputData.data.substr(1, audioInputData.data.length - 1).split(','); + audioData = audioinput._normalizeAudio(audioData); + + if (audioinput.cfg.streamToWebAudio && audioinput.capturing) { + audioinput._enqueueAudioData(audioData); + } + else { + cordova.fireWindowEvent("audioinput", { data: audioData }); + } + } + else if (audioInputData && audioInputData.error) { + audioinput.error(audioInputData.error); + } + else { + //audioinput.error("Empty"); // Happens when capture is stopped + } + } + catch (ex) { + audioinput.error("audioinput._audioInputEvent ex: " + ex); + } +}; + +/** + * Error callback for AudioInputCapture start + * @private + */ +audioinput.error = function (e) { + cordova.fireWindowEvent("audioinputerror", e); +}; + +/** + * Normalize audio input + * + * @param {Object} pcmData + * @private + */ +audioinput._normalizeAudio = function (pcmData) { + + if (audioinput.cfg.normalize) { + for (var i = 0; i < pcmData.length; i++) { + pcmData[i] = parseFloat(pcmData[i] / audioinput.getCfg().normalizationFactor); + } + + // If last value is NaN, remove it. + if (isNaN(pcmData[pcmData.length - 1])) { + pcmData.pop(); + } + } + + return pcmData; +}; + + +/** + * Consumes data from the audioinput queue + * @private + */ +audioinput._getNextToPlay = function () { + try { + var duration = 100; + + if (audioinput.audioDataQueue.length > 0) { + var concatenatedData = []; + for (var i = 0; i < audioinput.concatenateMaxChunks; i++) { + if (audioinput.audioDataQueue.length === 0) { + break; + } + concatenatedData = concatenatedData.concat(audioinput._dequeueAudioData()); + } + + duration = audioinput._playAudio(concatenatedData) * 1000; + } + + if (audioinput.capturing) { + audioinput.timerGetNextAudio = setTimeout(audioinput._getNextToPlay, duration); + } + } + catch (ex) { + audioinput.error("audioinput._getNextToPlay ex: " + ex); + } +}; + + +/** + * Play audio using the Web Audio API + * @param data + * @returns {Number} + * @private + */ +audioinput._playAudio = function (data) { + try { + var audioBuffer = audioinput.audioContext.createBuffer(audioinput.cfg.channels, (data.length / audioinput.cfg.channels), + audioinput.cfg.sampleRate); + + var chdata = [], + index = 0; + + if (audioinput.cfg.channels > 1) { + for (var i = 0; i < audioinput.cfg.channels; i++) { + while (index < data.length) { + chdata.push(data[index + i]); + index += parseInt(audioinput.cfg.channels); + } + + audioBuffer.getChannelData(i).set(new Float32Array(chdata)); + } + } + else { + audioBuffer.getChannelData(0).set(data); + } + + var source = audioinput.audioContext.createBufferSource(); + source.buffer = audioBuffer; + source.connect(audioinput.micGainNode); + source.start(0); + + return audioBuffer.duration; + } + catch (ex) { + audioinput.error("audioinput._playAudio ex: " + ex); + } +}; + + +/** + * Creates the Web Audio Context and audio nodes for output. + * @private + */ +audioinput._initWebAudio = function (audioCtxFromCfg) { + if (!audioinput.audioContext) { // Only if not already set + if (!audioCtxFromCfg) { // Create a new context if not given in cfg + window.AudioContext = window.AudioContext || window.webkitAudioContext; + audioinput.audioContext = new AudioContext(); + } + else { + audioinput.audioContext = audioCtxFromCfg; + } + } + + // Create a gain node for volume control + if (!audioinput.micGainNode) { + audioinput.micGainNode = audioinput.audioContext.createGain(); + } +}; + +/** + * Puts audio data at the end of the queue + * + * @returns {*} + * @private + */ +audioinput._enqueueAudioData = function (data) { + audioinput.audioDataQueue.push(data); +}; + +/** + * Gets and removes the oldest audio data from the queue + * + * @returns {*} + * @private + */ +audioinput._dequeueAudioData = function () { + return audioinput.audioDataQueue.shift(); +}; + +/* +audioinput._handlers = function () { + return audioinput.channels.audioInput.numHandlers; +}; + +audioinput.onHasSubscribersChange = function () { + if (audioinput._handlers() === 0) { + exec(null, null, "AudioInputCapture", "stop", []); + } +}; + +audioinput.channels = { + audioInput: cordova.addWindowEventHandler("audioinput") +}; + +for (var key in audioinput.channels) { + if (audioinput.channels.hasOwnProperty(key)) { + audioinput.channels[key].onHasSubscribersChange = audioinput.onHasSubscribersChange; + } +}*/ + +module.exports = audioinput; + + +