line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package OpenAI::API::Request::Completion; |
2
|
|
|
|
|
|
|
|
3
|
18
|
|
|
18
|
|
147897
|
use strict; |
|
18
|
|
|
|
|
65
|
|
|
18
|
|
|
|
|
551
|
|
4
|
18
|
|
|
18
|
|
111
|
use warnings; |
|
18
|
|
|
|
|
46
|
|
|
18
|
|
|
|
|
498
|
|
5
|
|
|
|
|
|
|
|
6
|
18
|
|
|
18
|
|
788
|
use Moo; |
|
18
|
|
|
|
|
7422
|
|
|
18
|
|
|
|
|
106
|
|
7
|
18
|
|
|
18
|
|
8362
|
use strictures 2; |
|
18
|
|
|
|
|
1722
|
|
|
18
|
|
|
|
|
821
|
|
8
|
18
|
|
|
18
|
|
3712
|
use namespace::clean; |
|
18
|
|
|
|
|
15456
|
|
|
18
|
|
|
|
|
158
|
|
9
|
|
|
|
|
|
|
|
10
|
|
|
|
|
|
|
extends 'OpenAI::API::Request'; |
11
|
|
|
|
|
|
|
|
12
|
18
|
|
|
18
|
|
5954
|
use Types::Standard qw(Any Bool Int Map Num Str); |
|
18
|
|
|
|
|
117378
|
|
|
18
|
|
|
|
|
188
|
|
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
has model => ( is => 'rw', isa => Str, required => 1, ); |
15
|
|
|
|
|
|
|
has prompt => ( is => 'rw', isa => Str, required => 1, ); |
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
has suffix => ( is => 'rw', isa => Str, ); |
18
|
|
|
|
|
|
|
has max_tokens => ( is => 'rw', isa => Int, ); |
19
|
|
|
|
|
|
|
has temperature => ( is => 'rw', isa => Num, ); |
20
|
|
|
|
|
|
|
has top_p => ( is => 'rw', isa => Num, ); |
21
|
|
|
|
|
|
|
has n => ( is => 'rw', isa => Int, ); |
22
|
|
|
|
|
|
|
has stream => ( is => 'rw', isa => Bool, ); |
23
|
|
|
|
|
|
|
has logprobs => ( is => 'rw', isa => Int, ); |
24
|
|
|
|
|
|
|
has echo => ( is => 'rw', isa => Bool, ); |
25
|
|
|
|
|
|
|
has stop => ( is => 'rw', isa => Any, ); |
26
|
|
|
|
|
|
|
has presence_penalty => ( is => 'rw', isa => Num, ); |
27
|
|
|
|
|
|
|
has frequency_penalty => ( is => 'rw', isa => Num, ); |
28
|
|
|
|
|
|
|
has best_of => ( is => 'rw', isa => Int, ); |
29
|
|
|
|
|
|
|
has logit_bias => ( is => 'rw', isa => Map [ Int, Int ], ); |
30
|
|
|
|
|
|
|
has user => ( is => 'rw', isa => Str, ); |
31
|
|
|
|
|
|
|
|
32
|
1
|
|
|
1
|
1
|
24
|
sub endpoint { 'completions' } |
33
|
1
|
|
|
1
|
1
|
10
|
sub method { 'POST' } |
34
|
|
|
|
|
|
|
|
35
|
|
|
|
|
|
|
1; |
36
|
|
|
|
|
|
|
|
37
|
|
|
|
|
|
|
__END__ |
38
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
=head1 NAME |
40
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
OpenAI::API::Request::Completion - completions endpoint |
42
|
|
|
|
|
|
|
|
43
|
|
|
|
|
|
|
=head1 SYNOPSIS |
44
|
|
|
|
|
|
|
|
45
|
|
|
|
|
|
|
use OpenAI::API::Request::Completion; |
46
|
|
|
|
|
|
|
|
47
|
|
|
|
|
|
|
my $request = OpenAI::API::Request::Completion->new( |
48
|
|
|
|
|
|
|
model => "text-davinci-003", |
49
|
|
|
|
|
|
|
prompt => "Say this is a test", |
50
|
|
|
|
|
|
|
max_tokens => 10, |
51
|
|
|
|
|
|
|
temperature => 0, |
52
|
|
|
|
|
|
|
); |
53
|
|
|
|
|
|
|
|
54
|
|
|
|
|
|
|
my $res = $request->send(); # or: $request->send( http_response => 1 ); |
55
|
|
|
|
|
|
|
|
56
|
|
|
|
|
|
|
my $text = $res->{choices}[0]{text}; |
57
|
|
|
|
|
|
|
|
58
|
|
|
|
|
|
|
# or... |
59
|
|
|
|
|
|
|
|
60
|
|
|
|
|
|
|
#print "# $text\n"; # string overload |
61
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
=head1 DESCRIPTION |
63
|
|
|
|
|
|
|
|
64
|
|
|
|
|
|
|
Given a prompt, the model will return one or more predicted completions. |
65
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
=head1 METHODS |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
=head2 new() |
69
|
|
|
|
|
|
|
|
70
|
|
|
|
|
|
|
=over 4 |
71
|
|
|
|
|
|
|
|
72
|
|
|
|
|
|
|
=item * model |
73
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
ID of the model to use. |
75
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
See L<Models overview|https://platform.openai.com/docs/models/overview> |
77
|
|
|
|
|
|
|
for a reference of them. |
78
|
|
|
|
|
|
|
|
79
|
|
|
|
|
|
|
=item * prompt |
80
|
|
|
|
|
|
|
|
81
|
|
|
|
|
|
|
The prompt for the text generation. |
82
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
=item * suffix [optional] |
84
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
The suffix that comes after a completion of inserted text. |
86
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
=item * max_tokens [optional] |
88
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
The maximum number of tokens to generate. |
90
|
|
|
|
|
|
|
|
91
|
|
|
|
|
|
|
Most models have a context length of 2048 tokens (except for the newest |
92
|
|
|
|
|
|
|
models, which support 4096. |
93
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
=item * temperature [optional] |
95
|
|
|
|
|
|
|
|
96
|
|
|
|
|
|
|
What sampling temperature to use, between 0 and 2. Higher values like |
97
|
|
|
|
|
|
|
0.8 will make the output more random, while lower values like 0.2 will |
98
|
|
|
|
|
|
|
make it more focused and deterministic. |
99
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
=item * top_p [optional] |
101
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
An alternative to sampling with temperature, called nucleus sampling. |
103
|
|
|
|
|
|
|
|
104
|
|
|
|
|
|
|
We generally recommend altering this or C<temperature> but not both. |
105
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
=item * n [optional] |
107
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
How many completions to generate for each prompt. |
109
|
|
|
|
|
|
|
|
110
|
|
|
|
|
|
|
Use carefully and ensure that you have reasonable settings for |
111
|
|
|
|
|
|
|
C<max_tokens> and C<stop>. |
112
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
=item * stop [optional] |
114
|
|
|
|
|
|
|
|
115
|
|
|
|
|
|
|
Up to 4 sequences where the API will stop generating further tokens. The |
116
|
|
|
|
|
|
|
returned text will not contain the stop sequence. |
117
|
|
|
|
|
|
|
|
118
|
|
|
|
|
|
|
=item * frequency_penalty [optional] |
119
|
|
|
|
|
|
|
|
120
|
|
|
|
|
|
|
Number between -2.0 and 2.0. Positive values penalize new tokens based |
121
|
|
|
|
|
|
|
on their existing frequency in the text so far. |
122
|
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
=item * presence_penalty [optional] |
124
|
|
|
|
|
|
|
|
125
|
|
|
|
|
|
|
Number between -2.0 and 2.0. Positive values penalize new tokens based |
126
|
|
|
|
|
|
|
on whether they appear in the text so far. |
127
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
=item * best_of [optional] |
129
|
|
|
|
|
|
|
|
130
|
|
|
|
|
|
|
Generates best_of completions server-side and returns the "best" (the |
131
|
|
|
|
|
|
|
one with the highest log probability per token). |
132
|
|
|
|
|
|
|
|
133
|
|
|
|
|
|
|
Use carefully and ensure that you have reasonable settings for |
134
|
|
|
|
|
|
|
C<max_tokens> and C<stop>. |
135
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
=back |
137
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
=head2 send() |
139
|
|
|
|
|
|
|
|
140
|
|
|
|
|
|
|
Sends the request and returns a data structured similar to the one |
141
|
|
|
|
|
|
|
documented in the API reference. |
142
|
|
|
|
|
|
|
|
143
|
|
|
|
|
|
|
=head2 send_async() |
144
|
|
|
|
|
|
|
|
145
|
|
|
|
|
|
|
Send a request asynchronously. Returns a L<future|IO::Async::Future> that will |
146
|
|
|
|
|
|
|
be resolved with the decoded JSON response. See L<OpenAI::API::Request> |
147
|
|
|
|
|
|
|
for an example. |
148
|
|
|
|
|
|
|
|
149
|
|
|
|
|
|
|
=head1 SEE ALSO |
150
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
OpenAI API Reference: L<Completions|https://platform.openai.com/docs/api-reference/completions> |