Skip to content

Commit

Permalink
Merge pull request #86 from huanfeng/main
Browse files Browse the repository at this point in the history
Add reponse_format support and gpt-3.5-turbo-1106 define
  • Loading branch information
redevrx authored Dec 28, 2023
2 parents 3474c4d + 3bf2dc0 commit c81f7ec
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 0 deletions.
1 change: 1 addition & 0 deletions lib/chat_gpt_sdk.dart
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,4 @@ export 'src/model/chat_complete/enum/role.dart';
export 'src/model/chat_complete/enum/function_call.dart';
export 'src/model/chat_complete/request/messages.dart';
export 'src/model/chat_complete/request/function_data.dart';
export 'src/model/chat_complete/request/response_format.dart';
4 changes: 4 additions & 0 deletions lib/src/model/chat_complete/enum/chat_model.dart
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ class GptTurbo0631Model extends ChatModel {
GptTurbo0631Model() : super(model: kChatGptTurbo0613);
}

class GptTurbo1106Model extends ChatModel {
GptTurbo1106Model() : super(model: kChatGptTurbo1106);
}

class GptTurbo16k0631Model extends ChatModel {
GptTurbo16k0631Model() : super(model: kChatGptTurbo16k0613);
}
Expand Down
9 changes: 9 additions & 0 deletions lib/src/model/chat_complete/request/chat_complete_text.dart
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import 'package:chat_gpt_sdk/src/model/chat_complete/enum/chat_model.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/enum/function_call.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/function_data.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/messages.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/response_format.dart';

class ChatCompleteText {
///ID of the model to use. Currently, only gpt-3.5-turbo and
Expand Down Expand Up @@ -38,6 +39,11 @@ class ChatCompleteText {
/// [functionCall]
final FunctionCall? functionCall;

///Defines the format of the model's response output. Currently, only supports
/// "json_object", and it is available only for certain models.
/// [responseFormat]
final ResponseFormat? responseFormat;

///What sampling temperature to use, between 0 and
///2. Higher values like 0.8 will make the output more random,
///while lower values like 0.2 will make it more focused and deterministic.
Expand Down Expand Up @@ -111,6 +117,7 @@ class ChatCompleteText {
this.user = "",
this.functions,
this.functionCall,
this.responseFormat,
});

Map<String, dynamic> toJson() {
Expand All @@ -130,6 +137,7 @@ class ChatCompleteText {
"presence_penalty": presencePenalty,
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
})
: Map.of({
"model": model.model,
Expand All @@ -143,6 +151,7 @@ class ChatCompleteText {
"presence_penalty": presencePenalty,
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
})
..removeWhere((key, value) => value == null);

Expand Down
14 changes: 14 additions & 0 deletions lib/src/model/chat_complete/request/response_format.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
class ResponseFormat {
final String type;

ResponseFormat({required this.type});

Map<String, dynamic> toJson() {
final data = <String, dynamic>{};
data['type'] = type;

return data;
}

static final jsonObject = ResponseFormat(type: "json_object");
}
1 change: 1 addition & 0 deletions lib/src/utils/constants.dart
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ const kChatGpt40314 = 'gpt-4-0314';
const kChatGpt432k = 'gpt-4-32k';
const kChatGpt432k0314 = 'gpt-4-32k-0314';
const kChatGptTurbo0613 = 'gpt-3.5-turbo-0613';
const kChatGptTurbo1106 = 'gpt-3.5-turbo-1106';
const kChatGptTurbo16k0613 = 'gpt-3.5-turbo-16k-0613';
const kChatGpt40631 = 'gpt-4-0613';

Expand Down

0 comments on commit c81f7ec

Please sign in to comment.