Skip to content

Commit

Permalink
feat: chat complete image input
Browse files Browse the repository at this point in the history
  • Loading branch information
redevrx committed Jan 24, 2024
1 parent c8ccc12 commit 3ccbb86
Show file tree
Hide file tree
Showing 9 changed files with 182 additions and 127 deletions.
88 changes: 60 additions & 28 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ FutureBuilder<CTResponse?>(
void chatComplete() async {
final request = ChatCompleteText(messages: [
Map.of({"role": "user", "content": 'Hello!'})
], maxToken: 200, model: GptTurbo0301ChatModel());
], maxToken: 200, model: Gpt41106PreviewChatModel());
final response = await openAI.onChatCompletion(request: request);
for (var element in response!.choices) {
Expand All @@ -216,37 +216,69 @@ FutureBuilder<CTResponse?>(
- Chat Complete Function Calling

```dart
/// work only with gpt-turbo-0613,gpt-4-0613
void gptFunctionCalling() async {
final request = ChatCompleteText(
messages: [
Messages(
role: Role.user,
content: "What is the weather like in Boston?",
name: "get_current_weather"),
],
maxToken: 200,
model: Gpt41106PreviewChatModel(),
tools: [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
],
toolChoice: 'auto',
);
ChatCTResponse? response = await openAI.onChatCompletion(request: request);
}
```

- Chat Complete Image Input

```dart
void imageInput() async {
final request = ChatCompleteText(
messages: [
Messages(
role: Role.user, content: "What is the weather like in Boston?",name: "get_current_weather"),
],
maxToken: 200,
model: GptTurbo0631Model(),
functions: [
FunctionData(
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
})
],
functionCall: FunctionCall.auto);
messages: [
{
"role": "user",
"content": [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {"url": "image-url"}
}
]
}
],
maxToken: 200,
model: Gpt4VisionPreviewChatModel(),
);
ChatCTResponse? response = await openAI.onChatCompletion(request: request);
debugPrint("$response");
}
```

Expand Down
84 changes: 56 additions & 28 deletions example/lib/main.dart
Original file line number Diff line number Diff line change
Expand Up @@ -45,42 +45,70 @@ class _TranslateScreenState extends State<TranslateScreen> {
///parameter name is require
void gptFunctionCalling() async {
final request = ChatCompleteText(
messages: [
Messages(
role: Role.user,
content: "What is the weather like in Boston?",
name: "get_current_weather"),
],
maxToken: 200,
model: GptTurbo0631Model(),
functions: [
FunctionData(
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
messages: [
Messages(
role: Role.user,
content: "What is the weather like in Boston?",
name: "get_current_weather")
.toJson(),
],
maxToken: 200,
model: Gpt41106PreviewChatModel(),
tools: [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"required": ["location"]
})
],
functionCall: FunctionCall.auto);
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
],
toolChoice: 'auto',
);

ChatCTResponse? response = await openAI.onChatCompletion(request: request);
debugPrint("$response");
}

void imageInput() async {
final request = ChatCompleteText(
messages: [
{
"role": "user",
"content": [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {"url": "image-url"}
}
]
}
],
maxToken: 200,
model: Gpt4VisionPreviewChatModel(),
);

ChatCTResponse? response = await openAI.onChatCompletion(request: request);
debugPrint("$response");
}

void gpt4() async {
final request = ChatCompleteText(messages: [
Messages(role: Role.assistant, content: 'Hello!'),
Messages(role: Role.assistant, content: 'Hello!').toJson(),
], maxToken: 200, model: Gpt4ChatModel());

await openAI.onChatCompletion(request: request);
Expand Down
1 change: 1 addition & 0 deletions example_app/openai_app/lib/bloc/openai/openai_bloc.dart
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ class OpenAIBloc extends Cubit<OpenAIState> {
model: _getVersion() ? Gpt4ChatModel() : GptTurboChatModel(),
messages: [
Messages(role: Role.user, content: getTextInput().value.text)
.toJson(),
],
maxToken: 400);

Expand Down
12 changes: 12 additions & 0 deletions lib/src/assistants.dart
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,16 @@ import 'client/client.dart';
class Assistants {
final OpenAIClient _client;
Assistants(this._client);

void create() {
_client.get(
'url',
onSuccess: (p0) {
return;
},
onCancel: (cancelData) {
return;
},
);
}
}
4 changes: 4 additions & 0 deletions lib/src/model/chat_complete/enum/chat_model.dart
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ class Gpt4VisionPreviewChatModel extends ChatModel {
Gpt4VisionPreviewChatModel() : super(model: kGpt4VisionPreview);
}

class Gpt41106PreviewChatModel extends ChatModel {
Gpt41106PreviewChatModel() : super(model: kGpt41106Preview);
}

// enum ChatModel {
// gptTurbo,
//
Expand Down
63 changes: 20 additions & 43 deletions lib/src/model/chat_complete/request/chat_complete_text.dart
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import 'package:chat_gpt_sdk/src/model/chat_complete/enum/chat_model.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/enum/function_call.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/function_data.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/messages.dart';
import 'package:chat_gpt_sdk/src/model/chat_complete/request/response_format.dart';

class ChatCompleteText {
Expand All @@ -23,7 +22,7 @@ class ChatCompleteText {

///The messages to generate chat completions for,
/// in the chat format. [messages]
final List<Messages> messages;
final List<Map<String, dynamic>> messages;

///A list of functions the model may generate JSON inputs for.
///[functions]
Expand Down Expand Up @@ -197,47 +196,25 @@ class ChatCompleteText {

Map<String, dynamic> toJson() {
Map<String, dynamic> json;
json = model is Gpt40631ChatModel || model is GptTurbo0631Model
? Map.of({
"model": model.model,
"messages": messages.map((e) => e.toJsonFunctionStruct()).toList(),
// "functions": functions?.map((e) => e.toJson()).toList(),
// "function_call": functionCall?.name,
"temperature": temperature,
"top_p": topP,
"n": n,
"stream": stream,
"stop": stop,
"max_tokens": maxToken,
"presence_penalty": presencePenalty,
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
"logit_bias": logitBias,
"logprobs": logprobs,
"top_logprobs": topLogprobs,
"seed": seed,
"tool_choice": toolChoice,
})
: Map.of({
"model": model.model,
"messages": messages.map((e) => e.toJson()).toList(),
"temperature": temperature,
"top_p": topP,
"n": n,
"stream": stream,
"stop": stop,
"max_tokens": maxToken,
"presence_penalty": presencePenalty,
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
"logit_bias": logitBias,
"logprobs": logprobs,
"top_logprobs": topLogprobs,
"seed": seed,
"tool_choice": toolChoice,
})
json = Map.of({
"model": model.model,
"messages": messages,
"temperature": temperature,
"top_p": topP,
"n": n,
"stream": stream,
"stop": stop,
"max_tokens": maxToken,
"presence_penalty": presencePenalty,
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
"logit_bias": logitBias,
"logprobs": logprobs,
"top_logprobs": topLogprobs,
"seed": seed,
"tool_choice": toolChoice,
})
..removeWhere((key, value) => value == null);

return json;
Expand Down
1 change: 1 addition & 0 deletions lib/src/model/chat_complete/request/function_data.dart
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
@Deprecated('')
class FunctionData {
///The name of the function to be called.
/// Must be a-z, A-Z, 0-9, or contain underscores
Expand Down
30 changes: 15 additions & 15 deletions test/model/chat_complete/request/chat_complete_text_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ void main() {
role: Role.user,
content: 'Hello',
name: 'function_name',
),
).toJson(),
];

final chatCompleteText = ChatCompleteText(
Expand Down Expand Up @@ -57,21 +57,21 @@ void main() {
role: Role.user,
content: 'Hello',
name: 'function_name',
),
).toJson(),
Messages(
role: Role.assistant,
content: 'Hi, how can I assist you today?',
name: 'function_name',
),
],
functionCall: FunctionCall.auto,
functions: [
FunctionData(
name: 'function_name',
description: '',
parameters: Map.of({}),
),
).toJson(),
],
// functionCall: FunctionCall.auto,
// functions: [
// FunctionData(
// name: 'function_name',
// description: '',
// parameters: Map.of({}),
// ),
// ],
);

final expectedJson = {
Expand All @@ -88,10 +88,10 @@ void main() {
'name': 'function_name',
},
],
'functions': [
{'name': 'function_name', 'description': '', 'parameters': {}},
],
'function_call': 'auto',
// 'functions': [
// {'name': 'function_name', 'description': '', 'parameters': {}},
// ],
// 'function_call': 'auto',
'temperature': 0.3,
'top_p': 1.0,
'n': 1,
Expand Down
Loading

0 comments on commit 3ccbb86

Please sign in to comment.