| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
package OpenAIAsync::Client; |
|
2
|
|
|
|
|
|
|
|
|
3
|
2
|
|
|
2
|
|
575599
|
use v5.36.0; |
|
|
2
|
|
|
|
|
10
|
|
|
4
|
2
|
|
|
2
|
|
1574
|
use Object::Pad; |
|
|
2
|
|
|
|
|
44052
|
|
|
|
2
|
|
|
|
|
38
|
|
|
5
|
2
|
|
|
2
|
|
2011
|
use IO::Async::SSL; # We're not directly using it but I want to enforce that we pull it in when detecting dependencies, since openai itself is always https |
|
|
2
|
|
|
|
|
619387
|
|
|
|
2
|
|
|
|
|
131
|
|
|
6
|
2
|
|
|
2
|
|
1376
|
use Future::AsyncAwait; |
|
|
2
|
|
|
|
|
4195
|
|
|
|
2
|
|
|
|
|
14
|
|
|
7
|
2
|
|
|
2
|
|
1468
|
use IO::Async; |
|
|
2
|
|
|
|
|
540
|
|
|
|
2
|
|
|
|
|
99
|
|
|
8
|
|
|
|
|
|
|
|
|
9
|
2
|
|
|
2
|
|
1318
|
use OpenAIAsync::Types::Results; |
|
|
2
|
|
|
|
|
12
|
|
|
|
2
|
|
|
|
|
130
|
|
|
10
|
2
|
|
|
2
|
|
1669
|
use OpenAIAsync::Types::Requests; |
|
|
2
|
|
|
|
|
14
|
|
|
|
2
|
|
|
|
|
772
|
|
|
11
|
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
our $VERSION = '0.01'; # TRIAL |
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
# ABSTRACT: Async client for OpenAI style REST API for various AI systems (LLMs, Images, Video, etc.) |
|
15
|
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
=pod |
|
17
|
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
=head1 NAME |
|
19
|
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
OpenAIAsync::Client - IO::Async based client for OpenAI compatible APIs |
|
21
|
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
=head1 SYNOPSIS |
|
23
|
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
use IO::Async::Loop; |
|
25
|
|
|
|
|
|
|
use OpenAIAsync::Client; |
|
26
|
|
|
|
|
|
|
|
|
27
|
|
|
|
|
|
|
my $loop = IO::Async::Loop->new(); |
|
28
|
|
|
|
|
|
|
|
|
29
|
|
|
|
|
|
|
my $client = OpenAIAsync::Client->new(); |
|
30
|
|
|
|
|
|
|
|
|
31
|
|
|
|
|
|
|
$loop->add($client); |
|
32
|
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
my $output = await $client->chat({ |
|
34
|
|
|
|
|
|
|
model => "gpt-3.5-turbo", |
|
35
|
|
|
|
|
|
|
messages => [ |
|
36
|
|
|
|
|
|
|
{ |
|
37
|
|
|
|
|
|
|
role => "system", |
|
38
|
|
|
|
|
|
|
content => "You are a helpful assistant that tells fanciful stories" |
|
39
|
|
|
|
|
|
|
}, |
|
40
|
|
|
|
|
|
|
{ |
|
41
|
|
|
|
|
|
|
role => "user", |
|
42
|
|
|
|
|
|
|
content => "Tell me a story of two princesses, Judy and Emmy. Judy is 8 and Emmy is 2." |
|
43
|
|
|
|
|
|
|
} |
|
44
|
|
|
|
|
|
|
], |
|
45
|
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
|
|
47
|
|
|
|
|
|
|
|
|
48
|
|
|
|
|
|
|
max_tokens => 1024, |
|
49
|
|
|
|
|
|
|
})->get(); |
|
50
|
|
|
|
|
|
|
|
|
51
|
|
|
|
|
|
|
# $output is now an OpenAIAsync::Type::Response::ChatCompletion |
|
52
|
|
|
|
|
|
|
|
|
53
|
|
|
|
|
|
|
=head1 THEORY OF OPERATION |
|
54
|
|
|
|
|
|
|
|
|
55
|
|
|
|
|
|
|
This module implements the L interface, this means that you create a new client and then call C<< $loop->add($client) >> |
|
56
|
|
|
|
|
|
|
this casues all Ls that are created to be part of the L of your program. This way when you call C on any method |
|
57
|
|
|
|
|
|
|
it will properly suspend the execution of your program and do something else concurrently (probably waiting on requests). |
|
58
|
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
=head1 Methods |
|
60
|
|
|
|
|
|
|
|
|
61
|
|
|
|
|
|
|
=head2 new() |
|
62
|
|
|
|
|
|
|
|
|
63
|
|
|
|
|
|
|
Create a new OpenAIAsync::Client. You'll need to register the client with C<< $loop->add($client) >> after creation. |
|
64
|
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
=head3 PARAMETERS |
|
66
|
|
|
|
|
|
|
|
|
67
|
|
|
|
|
|
|
=over 4 |
|
68
|
|
|
|
|
|
|
|
|
69
|
|
|
|
|
|
|
=item * api_base (optional) |
|
70
|
|
|
|
|
|
|
|
|
71
|
|
|
|
|
|
|
Base url of the service to connect to. Defaults to C. This should be a value pointing to something that |
|
72
|
|
|
|
|
|
|
implements the v1 OpenAI API, which for OobaBooga's text-generation-webui might be something like C. |
|
73
|
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
It will also be pulled from the environment variable C in the same fashion that the OpenAI libraries in other languages will do. |
|
75
|
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
=item * api_key (required) |
|
77
|
|
|
|
|
|
|
|
|
78
|
|
|
|
|
|
|
Api key that will be passed to the service you call. This gets passed as a header C to the service in all of the REST |
|
79
|
|
|
|
|
|
|
calls. This should be kept secret as it can be used to make all kinds of calls to paid services. |
|
80
|
|
|
|
|
|
|
|
|
81
|
|
|
|
|
|
|
It will also be pulled from the environment variable C in the same fashion that the OpenAI libraries in other languages will do. |
|
82
|
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
=item * api_org_name (optional) |
|
84
|
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
A name for the organization that's making the call. This can be used by OpenAI to help identify which part of your company is |
|
86
|
|
|
|
|
|
|
making any specific request, and I believe to help itemize billing and other tasks. |
|
87
|
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
=item * http_user_agent (optional) |
|
89
|
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
Set the useragent that's used to contact the API service. Defaults to |
|
91
|
|
|
|
|
|
|
|
|
92
|
|
|
|
|
|
|
C<< __PACKAGE__." Perl/$VERSION (Net::Async::HTTP/".$Net::Async::HTTP::VERSION." IO::Async/".$IO::Async::VERSION." Perl/$])" >> |
|
93
|
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
The default is to make it easier to debug if we ever see weird issues with the requests being generated but it does reveal some information |
|
95
|
|
|
|
|
|
|
about the code environment. |
|
96
|
|
|
|
|
|
|
|
|
97
|
|
|
|
|
|
|
=item * http_max_in_flight (optional) |
|
98
|
|
|
|
|
|
|
|
|
99
|
|
|
|
|
|
|
How many requests should we allow to happen at once. Increasing this will increase the allowed parallel requests, but that can also |
|
100
|
|
|
|
|
|
|
allow you to make too many requests and cost more in API calls. |
|
101
|
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
Defaults to 2 |
|
103
|
|
|
|
|
|
|
|
|
104
|
|
|
|
|
|
|
=item * http_max_connections_per_host (optional) |
|
105
|
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
TODO, I'm thinking this one will get dropped. Effectively since we're only ever connecting to one server this ends up functioning the same as the above parameter. |
|
107
|
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
Defaults to 2 |
|
109
|
|
|
|
|
|
|
|
|
110
|
|
|
|
|
|
|
=item * http_max_redirects (optional) |
|
111
|
|
|
|
|
|
|
|
|
112
|
|
|
|
|
|
|
How many redirects to allow. The official OpenAI API never sends redirects (for now) but for self hosted or other custom setups this might happen and should be handled correctly |
|
113
|
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
Defaults to 3 |
|
115
|
|
|
|
|
|
|
|
|
116
|
|
|
|
|
|
|
=item * http_timeout (optional) |
|
117
|
|
|
|
|
|
|
|
|
118
|
|
|
|
|
|
|
How long to wait on any given request to start. |
|
119
|
|
|
|
|
|
|
|
|
120
|
|
|
|
|
|
|
Defaults to 120 seconds. |
|
121
|
|
|
|
|
|
|
|
|
122
|
|
|
|
|
|
|
=item * http_stall_timeout (optional) |
|
123
|
|
|
|
|
|
|
|
|
124
|
|
|
|
|
|
|
How long to wait on any given request to decide if it's been stalled. If a request starts responding and then stops part way through, this is how we'll treat it as stalled and time it out |
|
125
|
|
|
|
|
|
|
|
|
126
|
|
|
|
|
|
|
Defaults to 600s (10 minutes). This is unlikely to happen except for a malfunctioning inference service since once generation starts to return it'll almost certainly finish. |
|
127
|
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
=item * http_other (optional) |
|
129
|
|
|
|
|
|
|
|
|
130
|
|
|
|
|
|
|
A hash ref that gets passed as additional parameters to L's constructor. All values will be overriden by the ones above, so if a parameter is supported use those first. |
|
131
|
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
=back |
|
133
|
|
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
=head2 completion (deprecated) |
|
135
|
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
Create a request for completion, this takes a prompt and returns a response. See L for exact details. |
|
137
|
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
This particular API has been deprecated by OpenAI in favor of doing everything through the chat completion api below. However it is still supported |
|
139
|
|
|
|
|
|
|
by OpenAI and compatible servers as it's a very simple interface to use |
|
140
|
|
|
|
|
|
|
|
|
141
|
|
|
|
|
|
|
=head2 chat |
|
142
|
|
|
|
|
|
|
|
|
143
|
|
|
|
|
|
|
Create a request for the chat completion api. This takes a series of messages and returns a new chat response. See L for exact details. |
|
144
|
|
|
|
|
|
|
|
|
145
|
|
|
|
|
|
|
This API takes a series of messages from different agent sources and then responds as the assistant agent. A typical interaction is to start with a C<"system"> agent message |
|
146
|
|
|
|
|
|
|
to set the context for the assistant, followed by the C<"user"> agent type for the user's request. You'll then get the response from the assistant agent to give to the user. |
|
147
|
|
|
|
|
|
|
|
|
148
|
|
|
|
|
|
|
To continue the chat, you'd then take the new message and insert it into the list of messages as part of the chat and make a new request with the user's response. I'll be creating |
|
149
|
|
|
|
|
|
|
a new module that uses this API and helps manage the chat in an easier manner with a few helper functions. |
|
150
|
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
=head2 embedding |
|
152
|
|
|
|
|
|
|
|
|
153
|
|
|
|
|
|
|
Create a request for calculating the embedding of an input. This takes a bit of text and returns a gigantic list of numbers, see L for exact details. |
|
154
|
|
|
|
|
|
|
|
|
155
|
|
|
|
|
|
|
These values are a bit difficult to explain how they work, but essentially you get a mathematical object, a vector, that describes the contents of the input as |
|
156
|
|
|
|
|
|
|
a point in an N-dimensional space (typically 768 or 1536 dimensions). The dimensions themselves really don't have any inherit mathematical meaning but are instead relative to one-another |
|
157
|
|
|
|
|
|
|
from the training data of the embedding model. |
|
158
|
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
You'll want to take the vector and store it in a database that supports vector operations, like PostgreSQL with the L extension. |
|
160
|
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
=head2 image_generate |
|
162
|
|
|
|
|
|
|
|
|
163
|
|
|
|
|
|
|
Unimplemented, but once present will be used to generate images with Dall-E (or for self hosted, stable diffusion). |
|
164
|
|
|
|
|
|
|
|
|
165
|
|
|
|
|
|
|
=head2 text_to_speech |
|
166
|
|
|
|
|
|
|
|
|
167
|
|
|
|
|
|
|
Unimplemented, but can be used to turn text to speech using whatever algorithms/models are supported. |
|
168
|
|
|
|
|
|
|
|
|
169
|
|
|
|
|
|
|
=head2 speech_to_text |
|
170
|
|
|
|
|
|
|
|
|
171
|
|
|
|
|
|
|
Unimplemented. The opposite of the above. |
|
172
|
|
|
|
|
|
|
|
|
173
|
|
|
|
|
|
|
=head2 vision |
|
174
|
|
|
|
|
|
|
|
|
175
|
|
|
|
|
|
|
Unimplemented, I've not investigated this one much yet but I believe it's to get a description of an image and it's contents. |
|
176
|
|
|
|
|
|
|
|
|
177
|
|
|
|
|
|
|
=head2 Missing apis |
|
178
|
|
|
|
|
|
|
|
|
179
|
|
|
|
|
|
|
At least some for getting the list of models and some other meta information, those will be added next after I get some more documentation written |
|
180
|
|
|
|
|
|
|
|
|
181
|
|
|
|
|
|
|
=head1 See Also |
|
182
|
|
|
|
|
|
|
|
|
183
|
|
|
|
|
|
|
L, L, L |
|
184
|
|
|
|
|
|
|
|
|
185
|
|
|
|
|
|
|
=head1 License |
|
186
|
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
Artistic 2.0 |
|
188
|
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
=head1 Author |
|
190
|
|
|
|
|
|
|
|
|
191
|
|
|
|
|
|
|
Ryan Voots, ... etc. |
|
192
|
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
=cut |
|
194
|
|
|
|
|
|
|
|
|
195
|
|
|
|
|
|
|
class OpenAIAsync::Client :repr(HASH) :isa(IO::Async::Notifier) :strict(params) { |
|
196
|
2
|
|
|
2
|
|
1711
|
use JSON::MaybeXS qw//; |
|
|
2
|
|
|
|
|
21347
|
|
|
|
2
|
|
|
|
|
201
|
|
|
197
|
2
|
|
|
2
|
|
1707
|
use Net::Async::HTTP; |
|
|
2
|
|
|
|
|
294903
|
|
|
|
2
|
|
|
|
|
170
|
|
|
198
|
2
|
|
|
2
|
|
1429
|
use Feature::Compat::Try; |
|
|
2
|
|
|
|
|
1155
|
|
|
|
2
|
|
|
|
|
13
|
|
|
199
|
2
|
|
|
2
|
|
242
|
use URI; |
|
|
2
|
|
|
|
|
6
|
|
|
|
2
|
|
|
|
|
10935
|
|
|
200
|
|
|
|
|
|
|
|
|
201
|
|
|
|
|
|
|
field $_json = JSON::MaybeXS->new(utf8 => 1, convert_blessed => 1); |
|
202
|
|
|
|
|
|
|
field $http; |
|
203
|
|
|
|
|
|
|
|
|
204
|
|
|
|
|
|
|
# TODO document these directly, other options gets mixed in BEFORE all of these |
|
205
|
|
|
|
|
|
|
field $_http_max_in_flight :param(http_max_in_flight) = 2; |
|
206
|
|
|
|
|
|
|
field $_http_max_redirects :param(http_max_redirects) = 3; |
|
207
|
|
|
|
|
|
|
field $_http_max_connections_per_host :param(http_max_connections_per_host) = 2; |
|
208
|
|
|
|
|
|
|
field $_http_timeout :param(http_timeout) = 120; # My personal server is kinda slow, use a generous default |
|
209
|
|
|
|
|
|
|
field $_http_stall_timeout :param(http_stall_timeout) = 600; # generous for my slow personal server |
|
210
|
|
|
|
|
|
|
field $_http_other :param(http_other_options) = {}; |
|
211
|
|
|
|
|
|
|
field $_http_user_agent :param(http_user_agent) = __PACKAGE__." Perl/$VERSION (Net::Async::HTTP/".$Net::Async::HTTP::VERSION." IO::Async/".$IO::Async::VERSION." Perl/$])"; |
|
212
|
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
field $api_base :param(api_base) = $ENV{OPENAI_API_BASE} // "https://api.openai.com/v1"; |
|
214
|
|
|
|
|
|
|
field $api_key :param(api_key) = $ENV{OPENAI_API_KEY}; |
|
215
|
|
|
|
|
|
|
|
|
216
|
|
|
|
|
|
|
field $api_org_name :param(api_org_name) = undef; |
|
217
|
|
|
|
|
|
|
|
|
218
|
|
|
|
|
|
|
field $io_async_notifier_params :param = undef; |
|
219
|
|
|
|
|
|
|
|
|
220
|
4
|
|
|
4
|
1
|
333181
|
method configure(%params) { |
|
|
4
|
|
|
|
|
42
|
|
|
|
4
|
|
|
|
|
4
|
|
|
|
4
|
|
|
|
|
7
|
|
|
221
|
|
|
|
|
|
|
# We require them to go this way, so that there is no conflicts |
|
222
|
|
|
|
|
|
|
# TODO document this |
|
223
|
4
|
|
100
|
|
|
19
|
my %io_async_params = ($params{io_async_notifier_params} // {})->%*; |
|
224
|
4
|
|
|
|
|
17
|
IO::Async::Notifier::configure($self, %io_async_params); |
|
225
|
|
|
|
|
|
|
} |
|
226
|
|
|
|
|
|
|
|
|
227
|
4
|
|
|
4
|
|
5
|
method __make_http() { |
|
|
4
|
|
|
|
|
9
|
|
|
|
4
|
|
|
|
|
3
|
|
|
228
|
4
|
50
|
|
|
|
9
|
die "Missing API Key for OpenAI" unless $api_key; |
|
229
|
|
|
|
|
|
|
|
|
230
|
4
|
50
|
|
|
|
40
|
return Net::Async::HTTP->new( |
|
231
|
|
|
|
|
|
|
$_http_other->%*, |
|
232
|
|
|
|
|
|
|
user_agent => "SNN OpenAI Client 1.0", |
|
233
|
|
|
|
|
|
|
+headers => { |
|
234
|
|
|
|
|
|
|
"Authorization" => "Bearer $api_key", |
|
235
|
|
|
|
|
|
|
"Content-Type" => "application/json", |
|
236
|
|
|
|
|
|
|
$api_org_name ? ( |
|
237
|
|
|
|
|
|
|
'OpenAI-Organization' => $api_org_name, |
|
238
|
|
|
|
|
|
|
) : () |
|
239
|
|
|
|
|
|
|
}, |
|
240
|
|
|
|
|
|
|
max_redirects => $_http_max_redirects, |
|
241
|
|
|
|
|
|
|
max_connections_per_host => $_http_max_connections_per_host, |
|
242
|
|
|
|
|
|
|
max_in_flight => $_http_max_in_flight, |
|
243
|
|
|
|
|
|
|
timeout => $_http_timeout, |
|
244
|
|
|
|
|
|
|
stall_timeout => $_http_stall_timeout, |
|
245
|
|
|
|
|
|
|
) |
|
246
|
|
|
|
|
|
|
} |
|
247
|
|
|
|
|
|
|
|
|
248
|
|
|
|
|
|
|
ADJUST { |
|
249
|
|
|
|
|
|
|
$http = $self->__make_http; |
|
250
|
|
|
|
|
|
|
|
|
251
|
|
|
|
|
|
|
$api_base =~ s|/$||; # trim an accidental final / since we will be putting it on the endpoints |
|
252
|
|
|
|
|
|
|
} |
|
253
|
|
|
|
|
|
|
|
|
254
|
0
|
|
|
0
|
|
|
async method _make_request($endpoint, $data) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
255
|
0
|
|
|
|
|
|
my $json = $_json->encode($data); |
|
256
|
|
|
|
|
|
|
|
|
257
|
0
|
|
|
|
|
|
my $url = URI->new($api_base . $endpoint ); |
|
258
|
|
|
|
|
|
|
|
|
259
|
0
|
|
|
|
|
|
my $result = await $http->do_request( |
|
260
|
|
|
|
|
|
|
uri => $url, |
|
261
|
|
|
|
|
|
|
method => "POST", |
|
262
|
|
|
|
|
|
|
content => $json, |
|
263
|
|
|
|
|
|
|
content_type => 'application/json', |
|
264
|
|
|
|
|
|
|
); |
|
265
|
|
|
|
|
|
|
|
|
266
|
0
|
0
|
|
|
|
|
if ($result->is_success) { |
|
267
|
0
|
|
|
|
|
|
my $json = $result->decoded_content; |
|
268
|
0
|
|
|
|
|
|
my $out_data = $_json->decode($json); |
|
269
|
|
|
|
|
|
|
|
|
270
|
0
|
|
|
|
|
|
return $out_data; |
|
271
|
|
|
|
|
|
|
} else { |
|
272
|
0
|
|
|
|
|
|
die "Failure in talking to OpenAI service: ".$result->status_line.": ".$result->decoded_content; |
|
273
|
|
|
|
|
|
|
} |
|
274
|
|
|
|
|
|
|
} |
|
275
|
|
|
|
|
|
|
|
|
276
|
0
|
|
|
0
|
|
|
method _add_to_loop($loop) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
277
|
0
|
|
|
|
|
|
$loop->add($http); |
|
278
|
|
|
|
|
|
|
} |
|
279
|
|
|
|
|
|
|
|
|
280
|
0
|
|
|
0
|
|
|
method _remove_from_loop($loop) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
281
|
0
|
|
|
|
|
|
$loop->remove($http); |
|
282
|
0
|
|
|
|
|
|
$http = $self->__make_http; # overkill? want to make sure we have a clean one |
|
283
|
|
|
|
|
|
|
} |
|
284
|
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
# This is the legacy completion api |
|
286
|
0
|
|
|
0
|
1
|
|
async method completion($input) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
287
|
|
|
|
|
|
|
|
|
288
|
0
|
0
|
|
|
|
|
if (ref($input) eq 'HASH') { |
|
|
|
0
|
|
|
|
|
|
|
289
|
0
|
|
|
|
|
|
$input = OpenAIAsync::Types::Requests::Completion->new($input->%*); |
|
290
|
|
|
|
|
|
|
} elsif (ref($input) eq 'OpenAIAsync::Types::Requests::Completion') { |
|
291
|
|
|
|
|
|
|
# dummy, nothing to do |
|
292
|
|
|
|
|
|
|
} else { |
|
293
|
0
|
|
|
|
|
|
die "Unsupported input type [".ref($input)."]"; |
|
294
|
|
|
|
|
|
|
} |
|
295
|
|
|
|
|
|
|
|
|
296
|
0
|
|
|
|
|
|
my $data = await $self->_make_request($input->_endpoint(), $input); |
|
297
|
|
|
|
|
|
|
|
|
298
|
0
|
|
|
|
|
|
my $type_result = OpenAIAsync::Types::Results::Completion->new($data->%*); |
|
299
|
|
|
|
|
|
|
|
|
300
|
0
|
|
|
|
|
|
return $type_result; |
|
301
|
|
|
|
|
|
|
} |
|
302
|
|
|
|
|
|
|
|
|
303
|
0
|
|
|
0
|
1
|
|
async method chat($input) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
304
|
0
|
0
|
|
|
|
|
if (ref($input) eq 'HASH') { |
|
|
|
0
|
|
|
|
|
|
|
305
|
0
|
|
|
|
|
|
$input = OpenAIAsync::Types::Requests::ChatCompletion->new($input->%*); |
|
306
|
|
|
|
|
|
|
} elsif (ref($input) eq 'OpenAIAsync::Types::Requests::ChatCompletion') { |
|
307
|
|
|
|
|
|
|
# dummy, nothing to do |
|
308
|
|
|
|
|
|
|
} else { |
|
309
|
0
|
|
|
|
|
|
die "Unsupported input type [".ref($input)."]"; |
|
310
|
|
|
|
|
|
|
} |
|
311
|
|
|
|
|
|
|
|
|
312
|
0
|
|
|
|
|
|
my $data = await $self->_make_request($input->_endpoint(), $input); |
|
313
|
|
|
|
|
|
|
|
|
314
|
0
|
|
|
|
|
|
my $type_result = OpenAIAsync::Types::Results::ChatCompletion->new($data->%*); |
|
315
|
|
|
|
|
|
|
|
|
316
|
0
|
|
|
|
|
|
return $type_result; |
|
317
|
|
|
|
|
|
|
} |
|
318
|
|
|
|
|
|
|
|
|
319
|
0
|
|
|
0
|
1
|
|
async method embedding($input) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
320
|
0
|
0
|
|
|
|
|
if (ref($input) eq 'HASH') { |
|
|
|
0
|
|
|
|
|
|
|
321
|
0
|
|
|
|
|
|
$input = OpenAIAsync::Types::Requests::Embedding->new($input->%*); |
|
322
|
|
|
|
|
|
|
} elsif (ref($input) eq 'OpenAIAsync::Types::Requests::Embedding') { |
|
323
|
|
|
|
|
|
|
# dummy, nothing to do |
|
324
|
|
|
|
|
|
|
} else { |
|
325
|
0
|
|
|
|
|
|
die "Unsupported input type [".ref($input)."]"; |
|
326
|
|
|
|
|
|
|
} |
|
327
|
|
|
|
|
|
|
|
|
328
|
0
|
|
|
|
|
|
my $data = await $self->_make_request($input->_endpoint(), $input); |
|
329
|
|
|
|
|
|
|
|
|
330
|
0
|
|
|
|
|
|
my $type_result = OpenAIAsync::Types::Results::Embedding->new($data->%*); |
|
331
|
|
|
|
|
|
|
|
|
332
|
0
|
|
|
|
|
|
return $type_result; |
|
333
|
|
|
|
|
|
|
} |
|
334
|
|
|
|
|
|
|
|
|
335
|
0
|
|
|
0
|
1
|
|
async method image_generate($input) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
336
|
|
|
|
|
|
|
... |
|
337
|
0
|
|
|
|
|
|
} |
|
338
|
|
|
|
|
|
|
|
|
339
|
0
|
|
|
0
|
1
|
|
async method text_to_speech($text) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
340
|
|
|
|
|
|
|
... |
|
341
|
0
|
|
|
|
|
|
} |
|
342
|
|
|
|
|
|
|
|
|
343
|
0
|
|
|
0
|
1
|
|
async method speech_to_text($sound_data) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
344
|
|
|
|
|
|
|
... |
|
345
|
0
|
|
|
|
|
|
} |
|
346
|
|
|
|
|
|
|
|
|
347
|
0
|
|
|
0
|
1
|
|
async method vision($image, $prompt) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
348
|
|
|
|
|
|
|
... |
|
349
|
0
|
|
|
|
|
|
} |
|
350
|
|
|
|
|
|
|
} |