Skip to content

Commit 8e703d0

Browse files
committed
feat: add raw query option and allow other customisations
1 parent 2db7217 commit 8e703d0

File tree

4 files changed

+85
-7
lines changed

4 files changed

+85
-7
lines changed

README.md

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,10 @@ You'll then need to configure a LLM, currently only OpenAPI is implemented. You
2424
```shell
2525
vi ~/.bashrc
2626
# then add your OpenAPI Key
27-
# export OPENAI_API_KEY=sk-123456
27+
export OPENAI_API_KEY=sk-123456
28+
29+
# optionally you can specify your preferred model
30+
export LLM_MODEL=gpt-3.5-turbo
2831
```
2932

3033
## Usage
@@ -37,6 +40,16 @@ llm the description of the command you want to run
3740
llm I want to do this then do this. Then do this
3841
```
3942

43+
Execute a query and have the response returned on the command line
44+
45+
```shell
46+
llm -q are there any warm blooded reptiles?
47+
```
48+
49+
you can specify your model preference using `-m gpt-3.5-turbo` (as will default to `gpt-4` if you have API access to it)
50+
51+
If you would like to verbose details use the `-v` flag
52+
4053
## Contributing
4154

4255
1. Fork it (<https://github.com/stakach/llm-cli/fork>)

src/llm-cli.cr

Lines changed: 65 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,37 @@
11
require "spinner"
22
require "colorize"
33
require "./llm-cli/*"
4+
require "option_parser"
5+
6+
# check for any runtime flags
7+
args = ARGV.dup
8+
verbose = false
9+
is_query = false
10+
model = ENV["LLM_MODEL"]? || ""
11+
12+
# Command line options
13+
OptionParser.parse(args) do |parser|
14+
parser.on("-m MODEL", "--model=MODEL", "specify a LLM model to use") do |llm_model|
15+
model = llm_model
16+
end
17+
18+
parser.on("-q", "--query", "Just ask a question of the LLM and return the response") do
19+
is_query = true
20+
end
21+
22+
parser.on("-v", "--verbose", "Output all the request and response data") do
23+
verbose = true
24+
end
25+
26+
parser.on("-h", "--help", "Show this help") do
27+
puts parser
28+
exit 0
29+
end
30+
end
431

532
# init the service
33+
shell = LLM::CLI::Shell.new
34+
spin = Spin.new
635
chat = begin
736
LLM::CLI::Chat.service
837
rescue error : LLM::CLI::Chat::Error
@@ -16,30 +45,61 @@ rescue error : LLM::CLI::Chat::Error
1645
end
1746
exit 1
1847
end
19-
shell = LLM::CLI::Shell.new
48+
49+
chat.model_preference = model
50+
puts "> Model Selected: #{chat.model_id}".colorize(:red) if verbose
51+
52+
# we want to ask some questions via the command line
53+
if is_query && (question = args.join("").presence)
54+
begin
55+
messages = [LLM::CLI::Chat::Message.new(LLM::CLI::Chat::Role::User, question)]
56+
loop do
57+
spin.start
58+
response = chat.chat messages
59+
messages << response
60+
spin.stop
61+
62+
puts "\n#{response.content}\n".colorize(:green)
63+
64+
question = shell.get_input("reply? ").strip
65+
exit 0 unless question.presence
66+
messages << LLM::CLI::Chat::Message.new(LLM::CLI::Chat::Role::User, question)
67+
end
68+
rescue error
69+
spin.stop
70+
puts error.inspect_with_backtrace.colorize(:red)
71+
exit 2
72+
end
73+
end
74+
2075
prompt = LLM::CLI::Prompt.new({
2176
"No user assistance, command ordering is important",
2277
"You are running on #{shell.operating_system}",
23-
"Commands to be executed on the following shell: #{File.basename shell.selected}",
78+
"The current shell is: #{File.basename shell.selected}",
2479
"Wrap unknown command parameters in <brackets>",
2580
"you might need to change directory before executing subsequent commands",
81+
"a single command might solve multiple goals, be creative",
2682
})
2783

2884
# grab the users request
29-
request = ARGV.dup.join(" ").presence
85+
request = args.join(" ").presence
3086
unless request
3187
puts "No command description provided".colorize(:red)
3288
exit 1
3389
end
3490
prompt.add_goal request
3591

36-
spin = Spin.new
92+
prompt_message = prompt.generate
93+
puts "> Requesting:\n#{prompt_message}\n".colorize(:red) if verbose
94+
3795
begin
3896
spin.start
3997
# query the configured LLM
40-
response = chat.send prompt.generate
98+
response = chat.send prompt_message
4199
spin.stop
42100

101+
puts "> Raw response:\n#{response}\n".colorize(:red) if verbose
102+
43103
# process the response
44104
begin
45105
cmds = LLM::CLI::Prompt::Response.from_json response

src/llm-cli/chat.cr

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ abstract class LLM::CLI::Chat
2828
help: String)
2929

3030
abstract def chat(message : Array(Message)) : Message
31+
abstract def model_id : String
32+
33+
property model_preference : String = ""
3134

3235
def send(message : String) : String
3336
chat([Message.new(:user, message)]).content

src/llm-cli/chat/gpt.cr

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,12 @@ class LLM::CLI::OpenAI::GPT < LLM::CLI::Chat
4242
getter model_id : String do
4343
response = client.get("/v1/models")
4444
models = List(Model).from_json(response.body).data.map(&.id)
45-
preferred = {"gpt-4", "gpt-3.5-turbo"}
45+
preferred = {model_preference, "gpt-4", "gpt-3.5-turbo"}
4646

4747
found = preferred.last
4848
preferred.each do |model|
49+
next unless model.presence
50+
4951
if models.includes?(model)
5052
found = model
5153
break

0 commit comments

Comments
 (0)