Skip to content

Commit

Permalink
Add support for audio transcriptions and translations (Whisper)
Browse files Browse the repository at this point in the history
  • Loading branch information
OkGoDoIt committed Dec 14, 2023
1 parent baad602 commit b8feb14
Show file tree
Hide file tree
Showing 15 changed files with 613 additions and 15 deletions.
56 changes: 56 additions & 0 deletions OpenAI_API/Audio/AudioRequest.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
using System;
using System.Collections.Generic;
using System.Text;
using Newtonsoft.Json;
using static OpenAI_API.Audio.TextToSpeechRequest;

namespace OpenAI_API.Audio
{
public class AudioRequest
{
/// <summary>
/// The model to use for this request. Currently only <see cref="OpenAI_API.Models.Model.Whisper1"/> is supported.
/// </summary>
[JsonProperty("model")]
public string Model { get; set; } = OpenAI_API.Models.Model.DefaultTranscriptionModel;

/// <summary>
/// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language for transcriptions, or English for translations.
/// </summary>
[JsonProperty("prompt", DefaultValueHandling = DefaultValueHandling.Ignore)]
public string Prompt { get; set; } = null;

/// <summary>
/// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
/// </summary>
[JsonProperty("language", DefaultValueHandling = DefaultValueHandling.Ignore)]
public string Language { get; set; } = null;

/// <summary>
/// The format of the transcript output, should be one of the options in <see cref="AudioRequest.ResponseFormats"/>. See <seealso href="https://platform.openai.com/docs/api-reference/audio/createTranscription#audio-createtranscription-response_format"/>
/// </summary>
[JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
public string ResponseFormat { get; set; } = null;

/// <summary>
/// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
/// </summary>
[JsonProperty("temperature", DefaultValueHandling = DefaultValueHandling.Ignore)]
public double Temperature { get; set; } = 0;


/// <summary>
/// The format of the transcript output. See <seealso href="https://platform.openai.com/docs/api-reference/audio/createTranscription#audio-createtranscription-response_format"/>
/// </summary>
public static class ResponseFormats
{
#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
public const string JSON = "json";
public const string Text = "text";
public const string SRT = "srt";
public const string VerboseJson = "verbose_json";
public const string VTT = "vtt";
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
}
}
}
32 changes: 32 additions & 0 deletions OpenAI_API/Audio/AudioResult.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace OpenAI_API.Audio
{
/// <summary>
/// Represents a verbose_json output from the OpenAI Transcribe or Translate endpoints.
/// </summary>
public class AudioResultVerbose : ApiResultBase
{
public double duration { get; set; }
public string language { get; set; }
public List<Segment> segments { get; set; }
public string task { get; set; }
public string text { get; set; }

public class Segment
{
public double avg_logprob { get; set; }
public double compression_ratio { get; set; }
public double end { get; set; }
public int id { get; set; }
public double no_speech_prob { get; set; }
public int seek { get; set; }
public double start { get; set; }
public double temperature { get; set; }
public string text { get; set; }
public List<int> tokens { get; set; }
}
}
}
4 changes: 2 additions & 2 deletions OpenAI_API/Audio/ITextToSpeechEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ public interface ITextToSpeechEndpoint
/// <param name="responseFormat">The default response format is "mp3", but other formats are available in <see cref="TextToSpeechRequest.ResponseFormats"/>. See <seealso href="https://platform.openai.com/docs/guides/text-to-speech/supported-output-formats"/></param>
/// <param name="model">TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, <see cref="Model.TTS_Speed"/> is optimized for real time text to speech use cases and <see cref="Model.TTS_HD"/> is optimized for quality.</param>
/// <returns>A stream of the audio file in the requested format.</returns>
Task<Stream> GetSpeechAsStreamAsync(string input, string voice = null, decimal? speed = null, string responseFormat = null, Model model = null);
Task<Stream> GetSpeechAsStreamAsync(string input, string voice = null, double? speed = null, string responseFormat = null, Model model = null);

/// <summary>
/// Calls the API to create speech from text, and saves the audio file to disk.
Expand All @@ -50,7 +50,7 @@ public interface ITextToSpeechEndpoint
/// <param name="responseFormat">The default response format is "mp3", but other formats are available in <see cref="TextToSpeechRequest.ResponseFormats"/>. See <seealso href="https://platform.openai.com/docs/guides/text-to-speech/supported-output-formats"/></param>
/// <param name="model">TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, <see cref="Model.TTS_Speed"/> is optimized for real time text to speech use cases and <see cref="Model.TTS_HD"/> is optimized for quality.</param>
/// <returns>A stream of the audio file in the requested format.</returns>
Task<FileInfo> SaveSpeechToFileAsync(string input, string localPath, string voice = null, decimal? speed = null, string responseFormat = null, Model model = null);
Task<FileInfo> SaveSpeechToFileAsync(string input, string localPath, string voice = null, double? speed = null, string responseFormat = null, Model model = null);


}
Expand Down
81 changes: 81 additions & 0 deletions OpenAI_API/Audio/ITranscriptionEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
using System.IO;
using System.Threading.Tasks;

namespace OpenAI_API.Audio
{
/// <summary>
/// Transcribe audio into text, with optional translation into English.
/// </summary>
public interface ITranscriptionEndpoint
{
/// <summary>
/// This allows you to set default parameters for every request, for example to set a default language. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
/// </summary>
AudioRequest DefaultRequestArgs { get; set; }

/// <summary>
/// Gets the transcription of the audio stream, in the specified format
/// </summary>
/// <param name="audioStream">The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="filename">The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.</param>
/// <param name="responseFormat">The format of the response. Suggested value are <see cref="AudioRequest.ResponseFormats.SRT"/> or <see cref="AudioRequest.ResponseFormats.VTT"/>. For text and Json formats, try <see cref="GetTranscriptionTextAsync(Stream, string, string, double?)"/> or <see cref="GetTranscriptionDetailsAsync(Stream, string, string, double?)"/> instead.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<string> GetAsFormatAsync(Stream audioStream, string filename,string responseFormat, string language = null, string prompt = null, double? temperature = null);

/// <summary>
/// Gets the transcription of the audio file, in the specified format
/// </summary>
/// <param name="audioFilePath">The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="responseFormat">The format of the response. Suggested value are <see cref="AudioRequest.ResponseFormats.SRT"/> or <see cref="AudioRequest.ResponseFormats.VTT"/>. For text and Json formats, try <see cref="GetTranscriptionTextAsync(Stream, string, string, double?)"/> or <see cref="GetTranscriptionDetailsAsync(Stream, string, string, double?)"/> instead.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<string> GetAsFormatAsync(string audioFilePath, string responseFormat, string language = null, string prompt = null, double? temperature = null);

/// <summary>
/// Gets the transcription of the audio stream, with full metadata
/// </summary>
/// <param name="audioStream">The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="filename">The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<AudioResultVerbose> GetWithDetailsAsync(Stream audioStream, string filename,string language = null, string prompt = null, double? temperature = null);

/// <summary>
/// Gets the transcription of the audio file, with full metadata
/// </summary>
/// <param name="audioFilePath">The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<AudioResultVerbose> GetWithDetailsAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null);

/// <summary>
/// Gets the transcription of the audio stream as a text string
/// </summary>
/// <param name="audioStream">The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="filename">The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<string> GetTextAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null);

/// <summary>
/// Gets the transcription of the audio file as a text string
/// </summary>
/// <param name="audioFilePath">The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.</param>
/// <param name="language">The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.</param>
/// <param name="prompt">An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.</param>
/// <param name="temperature">The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.</param>
/// <returns>A string of the transcribed text</returns>
Task<string> GetTextAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null);
}
}
6 changes: 3 additions & 3 deletions OpenAI_API/Audio/TextToSpeechEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ public class TextToSpeechEndpoint : EndpointBase, ITextToSpeechEndpoint
public TextToSpeechRequest DefaultTTSRequestArgs { get; set; } = new TextToSpeechRequest();

/// <summary>
/// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="OpenAIAPI"/> as <see cref="OpenAIAPI.Completions"/>.
/// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="OpenAIAPI"/> as <see cref="OpenAIAPI.TextToSpeech"/>.
/// </summary>
/// <param name="api">Pass in the instance of the api</param>
internal TextToSpeechEndpoint(OpenAIAPI api) : base(api) { }
Expand All @@ -48,7 +48,7 @@ public async Task<Stream> GetSpeechAsStreamAsync(TextToSpeechRequest request)
/// <param name="responseFormat">The default response format is "mp3", but other formats are available in <see cref="TextToSpeechRequest.ResponseFormats"/>. See <seealso href="https://platform.openai.com/docs/guides/text-to-speech/supported-output-formats"/></param>
/// <param name="model">TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, <see cref="Model.TTS_Speed"/> is optimized for real time text to speech use cases and <see cref="Model.TTS_HD"/> is optimized for quality.</param>
/// <returns>A stream of the audio file in the requested format.</returns>
public async Task<Stream> GetSpeechAsStreamAsync(string input, string voice = null, decimal? speed = null, string responseFormat = null, Model model = null)
public async Task<Stream> GetSpeechAsStreamAsync(string input, string voice = null, double? speed = null, string responseFormat = null, Model model = null)
{
var request = new TextToSpeechRequest()
{
Expand Down Expand Up @@ -87,7 +87,7 @@ public async Task<FileInfo> SaveSpeechToFileAsync(TextToSpeechRequest request, s
/// <param name="responseFormat">The default response format is "mp3", but other formats are available in <see cref="TextToSpeechRequest.ResponseFormats"/>. See <seealso href="https://platform.openai.com/docs/guides/text-to-speech/supported-output-formats"/></param>
/// <param name="model">TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, <see cref="Model.TTS_Speed"/> is optimized for real time text to speech use cases and <see cref="Model.TTS_HD"/> is optimized for quality.</param>
/// <returns>A stream of the audio file in the requested format.</returns>
public async Task<FileInfo> SaveSpeechToFileAsync(string input, string localPath, string voice = null, decimal? speed = null, string responseFormat = null, Model model = null)
public async Task<FileInfo> SaveSpeechToFileAsync(string input, string localPath, string voice = null, double? speed = null, string responseFormat = null, Model model = null)
{
var request = new TextToSpeechRequest()
{
Expand Down
2 changes: 1 addition & 1 deletion OpenAI_API/Audio/TextToSpeechRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public class TextToSpeechRequest
/// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
/// </summary>
[JsonProperty("speed", DefaultValueHandling = DefaultValueHandling.Ignore)]
public decimal? Speed { get; set; } = null;
public double? Speed { get; set; } = null;

/// <summary>
/// Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide. See <seealso href="https://platform.openai.com/docs/guides/text-to-speech/voice-options"/>.
Expand Down
Loading

0 comments on commit b8feb14

Please sign in to comment.