File Coverage

blib/lib/AI/Ollama/GenerateChatCompletionRequest.pm
Criterion Covered Total %
statement 20 24 83.3
branch n/a
condition n/a
subroutine 7 8 87.5
pod 0 1 0.0
total 27 33 81.8


line stmt bran cond sub pod time code
1             package AI::Ollama::GenerateChatCompletionRequest 0.05;
2             # DO NOT EDIT! This is an autogenerated file.
3              
4 1     1   20 use 5.020;
  1         5  
5 1     1   7 use Moo 2;
  1         33  
  1         8  
6 1     1   435 use experimental 'signatures';
  1         2  
  1         6  
7 1     1   239 use stable 'postderef';
  1         3  
  1         7  
8 1     1   121 use Types::Standard qw(Enum Str Bool Num Int HashRef ArrayRef);
  1         3  
  1         9  
9 1     1   2982 use MooX::TypeTiny;
  1         3  
  1         8  
10              
11 1     1   1103 use namespace::clean;
  1         4  
  1         7  
12              
13             =encoding utf8
14              
15             =head1 NAME
16              
17             AI::Ollama::GenerateChatCompletionRequest -
18              
19             =head1 SYNOPSIS
20              
21             my $obj = AI::Ollama::GenerateChatCompletionRequest->new();
22             ...
23              
24             =cut
25              
26 0     0 0   sub as_hash( $self ) {
  0            
  0            
27 0           return { $self->%* }
28             }
29              
30             =head1 PROPERTIES
31              
32             =head2 C<< format >>
33              
34             The format to return a response in. Currently the only accepted value is json.
35              
36             Enable JSON mode by setting the format parameter to json. This will structure the response as valid JSON.
37              
38             Note: it's important to instruct the model to use JSON in the prompt. Otherwise, the model may generate large amounts whitespace.
39              
40             =cut
41              
42             has 'format' => (
43             is => 'ro',
44             isa => Enum[
45             "json",
46             ],
47             );
48              
49             =head2 C<< keep_alive >>
50              
51             How long (in minutes) to keep the model loaded in memory.
52              
53             - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.
54             - If set to a negative duration (e.g. -1), the model will stay loaded indefinitely.
55             - If set to 0, the model will be unloaded immediately once finished.
56             - If not set, the model will stay loaded for 5 minutes by default
57              
58             =cut
59              
60             has 'keep_alive' => (
61             is => 'ro',
62             isa => Int,
63             );
64              
65             =head2 C<< messages >>
66              
67             The messages of the chat, this can be used to keep a chat memory
68              
69             =cut
70              
71             has 'messages' => (
72             is => 'ro',
73             isa => ArrayRef[HashRef],
74             required => 1,
75             );
76              
77             =head2 C<< model >>
78              
79             The model name.
80              
81             Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
82              
83             =cut
84              
85             has 'model' => (
86             is => 'ro',
87             isa => Str,
88             required => 1,
89             );
90              
91             =head2 C<< options >>
92              
93             Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
94              
95             =cut
96              
97             has 'options' => (
98             is => 'ro',
99             isa => HashRef,
100             );
101              
102             =head2 C<< stream >>
103              
104             If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.
105              
106             =cut
107              
108             has 'stream' => (
109             is => 'ro',
110             );
111              
112              
113             1;