11require " spinner"
22require " colorize"
33require " ./llm-cli/*"
4+ require " option_parser"
5+
6+ # check for any runtime flags
7+ args = ARGV .dup
8+ verbose = false
9+ is_query = false
10+ model = ENV [" LLM_MODEL" ]? || " "
11+
12+ # Command line options
13+ OptionParser .parse(args) do |parser |
14+ parser.on(" -m MODEL" , " --model=MODEL" , " specify a LLM model to use" ) do |llm_model |
15+ model = llm_model
16+ end
17+
18+ parser.on(" -q" , " --query" , " Just ask a question of the LLM and return the response" ) do
19+ is_query = true
20+ end
21+
22+ parser.on(" -v" , " --verbose" , " Output all the request and response data" ) do
23+ verbose = true
24+ end
25+
26+ parser.on(" -h" , " --help" , " Show this help" ) do
27+ puts parser
28+ exit 0
29+ end
30+ end
431
532# init the service
33+ shell = LLM ::CLI ::Shell .new
34+ spin = Spin .new
635chat = begin
736 LLM ::CLI ::Chat .service
837rescue error : LLM ::CLI ::Chat ::Error
@@ -16,30 +45,61 @@ rescue error : LLM::CLI::Chat::Error
1645 end
1746 exit 1
1847end
19- shell = LLM ::CLI ::Shell .new
48+
49+ chat.model_preference = model
50+ puts " > Model Selected: #{ chat.model_id } " .colorize(:red ) if verbose
51+
52+ # we want to ask some questions via the command line
53+ if is_query && (question = args.join(" " ).presence)
54+ begin
55+ messages = [LLM ::CLI ::Chat ::Message .new(LLM ::CLI ::Chat ::Role ::User , question)]
56+ loop do
57+ spin.start
58+ response = chat.chat messages
59+ messages << response
60+ spin.stop
61+
62+ puts " \n #{ response.content } \n " .colorize(:green )
63+
64+ question = shell.get_input(" reply? " ).strip
65+ exit 0 unless question.presence
66+ messages << LLM ::CLI ::Chat ::Message .new(LLM ::CLI ::Chat ::Role ::User , question)
67+ end
68+ rescue error
69+ spin.stop
70+ puts error.inspect_with_backtrace.colorize(:red )
71+ exit 2
72+ end
73+ end
74+
2075prompt = LLM ::CLI ::Prompt .new({
2176 " No user assistance, command ordering is important" ,
2277 " You are running on #{ shell.operating_system } " ,
23- " Commands to be executed on the following shell: #{ File .basename shell.selected} " ,
78+ " The current shell is : #{ File .basename shell.selected} " ,
2479 " Wrap unknown command parameters in <brackets>" ,
2580 " you might need to change directory before executing subsequent commands" ,
81+ " a single command might solve multiple goals, be creative" ,
2682})
2783
2884# grab the users request
29- request = ARGV .dup .join(" " ).presence
85+ request = args .join(" " ).presence
3086unless request
3187 puts " No command description provided" .colorize(:red )
3288 exit 1
3389end
3490prompt.add_goal request
3591
36- spin = Spin .new
92+ prompt_message = prompt.generate
93+ puts " > Requesting:\n #{ prompt_message } \n " .colorize(:red ) if verbose
94+
3795begin
3896 spin.start
3997 # query the configured LLM
40- response = chat.send prompt.generate
98+ response = chat.send prompt_message
4199 spin.stop
42100
101+ puts " > Raw response:\n #{ response } \n " .colorize(:red ) if verbose
102+
43103 # process the response
44104 begin
45105 cmds = LLM ::CLI ::Prompt ::Response .from_json response
0 commit comments