| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
package OpenAI::API::Request::Completion; |
|
2
|
|
|
|
|
|
|
|
|
3
|
18
|
|
|
18
|
|
150949
|
use strict; |
|
|
18
|
|
|
|
|
57
|
|
|
|
18
|
|
|
|
|
540
|
|
|
4
|
18
|
|
|
18
|
|
105
|
use warnings; |
|
|
18
|
|
|
|
|
54
|
|
|
|
18
|
|
|
|
|
434
|
|
|
5
|
|
|
|
|
|
|
|
|
6
|
18
|
|
|
18
|
|
747
|
use Moo; |
|
|
18
|
|
|
|
|
7220
|
|
|
|
18
|
|
|
|
|
113
|
|
|
7
|
18
|
|
|
18
|
|
8347
|
use strictures 2; |
|
|
18
|
|
|
|
|
1779
|
|
|
|
18
|
|
|
|
|
791
|
|
|
8
|
18
|
|
|
18
|
|
3847
|
use namespace::clean; |
|
|
18
|
|
|
|
|
15470
|
|
|
|
18
|
|
|
|
|
125
|
|
|
9
|
|
|
|
|
|
|
|
|
10
|
|
|
|
|
|
|
extends 'OpenAI::API::Request'; |
|
11
|
|
|
|
|
|
|
|
|
12
|
18
|
|
|
18
|
|
5777
|
use Types::Standard qw(Any Bool Int Map Num Str); |
|
|
18
|
|
|
|
|
118069
|
|
|
|
18
|
|
|
|
|
154
|
|
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
has model => ( is => 'rw', isa => Str, required => 1, ); |
|
15
|
|
|
|
|
|
|
has prompt => ( is => 'rw', isa => Str, required => 1, ); |
|
16
|
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
has suffix => ( is => 'rw', isa => Str, ); |
|
18
|
|
|
|
|
|
|
has max_tokens => ( is => 'rw', isa => Int, ); |
|
19
|
|
|
|
|
|
|
has temperature => ( is => 'rw', isa => Num, ); |
|
20
|
|
|
|
|
|
|
has top_p => ( is => 'rw', isa => Num, ); |
|
21
|
|
|
|
|
|
|
has n => ( is => 'rw', isa => Int, ); |
|
22
|
|
|
|
|
|
|
has stream => ( is => 'rw', isa => Bool, ); |
|
23
|
|
|
|
|
|
|
has logprobs => ( is => 'rw', isa => Int, ); |
|
24
|
|
|
|
|
|
|
has echo => ( is => 'rw', isa => Bool, ); |
|
25
|
|
|
|
|
|
|
has stop => ( is => 'rw', isa => Any, ); |
|
26
|
|
|
|
|
|
|
has presence_penalty => ( is => 'rw', isa => Num, ); |
|
27
|
|
|
|
|
|
|
has frequency_penalty => ( is => 'rw', isa => Num, ); |
|
28
|
|
|
|
|
|
|
has best_of => ( is => 'rw', isa => Int, ); |
|
29
|
|
|
|
|
|
|
has logit_bias => ( is => 'rw', isa => Map [ Int, Int ], ); |
|
30
|
|
|
|
|
|
|
has user => ( is => 'rw', isa => Str, ); |
|
31
|
|
|
|
|
|
|
|
|
32
|
1
|
|
|
1
|
1
|
25
|
sub endpoint { 'completions' } |
|
33
|
1
|
|
|
1
|
1
|
10
|
sub method { 'POST' } |
|
34
|
|
|
|
|
|
|
|
|
35
|
|
|
|
|
|
|
1; |
|
36
|
|
|
|
|
|
|
|
|
37
|
|
|
|
|
|
|
__END__ |
|
38
|
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
=head1 NAME |
|
40
|
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
OpenAI::API::Request::Completion - Request class for OpenAI API text |
|
42
|
|
|
|
|
|
|
completion |
|
43
|
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
=head1 SYNOPSIS |
|
45
|
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
use OpenAI::API::Request::Completion; |
|
47
|
|
|
|
|
|
|
|
|
48
|
|
|
|
|
|
|
my $completion = OpenAI::API::Request::Completion->new( |
|
49
|
|
|
|
|
|
|
model => 'text-davinci-003', |
|
50
|
|
|
|
|
|
|
prompt => 'Once upon a time', |
|
51
|
|
|
|
|
|
|
max_tokens => 50, |
|
52
|
|
|
|
|
|
|
); |
|
53
|
|
|
|
|
|
|
|
|
54
|
|
|
|
|
|
|
my $res = $completion->send(); # or: my $res = $completion->send( http_response => 1 ); |
|
55
|
|
|
|
|
|
|
my $text = $res->{choices}[0]{text}; # or: my $text = "$res"; |
|
56
|
|
|
|
|
|
|
|
|
57
|
|
|
|
|
|
|
|
|
58
|
|
|
|
|
|
|
=head1 DESCRIPTION |
|
59
|
|
|
|
|
|
|
|
|
60
|
|
|
|
|
|
|
This module provides a request class for interacting with the OpenAI API's |
|
61
|
|
|
|
|
|
|
chat-based completion endpoint. It inherits from L<OpenAI::API::Request>. |
|
62
|
|
|
|
|
|
|
|
|
63
|
|
|
|
|
|
|
=head1 ATTRIBUTES |
|
64
|
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
=head2 model |
|
66
|
|
|
|
|
|
|
|
|
67
|
|
|
|
|
|
|
ID of the model to use. |
|
68
|
|
|
|
|
|
|
|
|
69
|
|
|
|
|
|
|
See L<Models overview|https://platform.openai.com/docs/models/overview> |
|
70
|
|
|
|
|
|
|
for a reference of them. |
|
71
|
|
|
|
|
|
|
|
|
72
|
|
|
|
|
|
|
=head2 prompt |
|
73
|
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
The prompt for the text generation. |
|
75
|
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
=head2 suffix [optional] |
|
77
|
|
|
|
|
|
|
|
|
78
|
|
|
|
|
|
|
The suffix that comes after a completion of inserted text. |
|
79
|
|
|
|
|
|
|
|
|
80
|
|
|
|
|
|
|
=head2 max_tokens [optional] |
|
81
|
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
The maximum number of tokens to generate. |
|
83
|
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
Most models have a context length of 2048 tokens (except for the newest |
|
85
|
|
|
|
|
|
|
models, which support 4096. |
|
86
|
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
=head2 temperature [optional] |
|
88
|
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
What sampling temperature to use, between 0 and 2. Higher values like |
|
90
|
|
|
|
|
|
|
0.8 will make the output more random, while lower values like 0.2 will |
|
91
|
|
|
|
|
|
|
make it more focused and deterministic. |
|
92
|
|
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
=head2 top_p [optional] |
|
94
|
|
|
|
|
|
|
|
|
95
|
|
|
|
|
|
|
An alternative to sampling with temperature, called nucleus sampling. |
|
96
|
|
|
|
|
|
|
|
|
97
|
|
|
|
|
|
|
We generally recommend altering this or C<temperature> but not both. |
|
98
|
|
|
|
|
|
|
|
|
99
|
|
|
|
|
|
|
=head2 n [optional] |
|
100
|
|
|
|
|
|
|
|
|
101
|
|
|
|
|
|
|
How many completions to generate for each prompt. |
|
102
|
|
|
|
|
|
|
|
|
103
|
|
|
|
|
|
|
Use carefully and ensure that you have reasonable settings for |
|
104
|
|
|
|
|
|
|
C<max_tokens> and C<stop>. |
|
105
|
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
=head2 stop [optional] |
|
107
|
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
Up to 4 sequences where the API will stop generating further tokens. The |
|
109
|
|
|
|
|
|
|
returned text will not contain the stop sequence. |
|
110
|
|
|
|
|
|
|
|
|
111
|
|
|
|
|
|
|
=head2 frequency_penalty [optional] |
|
112
|
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
Number between -2.0 and 2.0. Positive values penalize new tokens based |
|
114
|
|
|
|
|
|
|
on their existing frequency in the text so far. |
|
115
|
|
|
|
|
|
|
|
|
116
|
|
|
|
|
|
|
=head2 presence_penalty [optional] |
|
117
|
|
|
|
|
|
|
|
|
118
|
|
|
|
|
|
|
Number between -2.0 and 2.0. Positive values penalize new tokens based |
|
119
|
|
|
|
|
|
|
on whether they appear in the text so far. |
|
120
|
|
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
=head2 best_of [optional] |
|
122
|
|
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
Generates best_of completions server-side and returns the "best" (the |
|
124
|
|
|
|
|
|
|
one with the highest log probability per token). |
|
125
|
|
|
|
|
|
|
|
|
126
|
|
|
|
|
|
|
Use carefully and ensure that you have reasonable settings for |
|
127
|
|
|
|
|
|
|
C<max_tokens> and C<stop>. |
|
128
|
|
|
|
|
|
|
|
|
129
|
|
|
|
|
|
|
=head1 METHODS |
|
130
|
|
|
|
|
|
|
|
|
131
|
|
|
|
|
|
|
=head2 add_message($role, $content) |
|
132
|
|
|
|
|
|
|
|
|
133
|
|
|
|
|
|
|
This method adds a new message with the given role and content to the |
|
134
|
|
|
|
|
|
|
messages attribute. |
|
135
|
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
=head2 send_message($content) |
|
137
|
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
This method adds a user message with the given content, sends the request, |
|
139
|
|
|
|
|
|
|
and returns the response. It also adds the assistant's response to the |
|
140
|
|
|
|
|
|
|
messages attribute. |
|
141
|
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
=head1 INHERITED METHODS |
|
143
|
|
|
|
|
|
|
|
|
144
|
|
|
|
|
|
|
This module inherits the following methods from L<OpenAI::API::Request>: |
|
145
|
|
|
|
|
|
|
|
|
146
|
|
|
|
|
|
|
=head2 send(%args) |
|
147
|
|
|
|
|
|
|
|
|
148
|
|
|
|
|
|
|
=head2 send_async(%args) |
|
149
|
|
|
|
|
|
|
|
|
150
|
|
|
|
|
|
|
=head1 SEE ALSO |
|
151
|
|
|
|
|
|
|
|
|
152
|
|
|
|
|
|
|
L<OpenAI::API::Request>, L<OpenAI::API::Config> |