1
1
from aicodebot import version as aicodebot_version
2
2
from aicodebot .coder import CREATIVE_TEMPERATURE , DEFAULT_MAX_TOKENS , Coder
3
3
from aicodebot .config import get_config_file , get_local_data_dir , read_config
4
- from aicodebot .helpers import RichLiveCallbackHandler , create_and_write_file , exec_and_get_output , logger
4
+ from aicodebot .helpers import (
5
+ RichLiveCallbackHandler ,
6
+ SidekickCompleter ,
7
+ create_and_write_file ,
8
+ exec_and_get_output ,
9
+ logger ,
10
+ )
5
11
from aicodebot .learn import load_documents_from_repo , store_documents
6
12
from aicodebot .prompts import DEFAULT_PERSONALITY , PERSONALITIES , generate_files_context , get_prompt
13
+ from datetime import datetime
7
14
from langchain .chains import LLMChain
8
15
from langchain .memory import ConversationTokenBufferMemory
9
16
from openai .api_resources import engine
14
21
from rich .live import Live
15
22
from rich .markdown import Markdown
16
23
from rich .style import Style
17
- import click , datetime , json , langchain , openai , os , random , shutil , subprocess , sys , tempfile , webbrowser , yaml
24
+ import click , humanize , json , langchain , openai , os , random , shutil , subprocess , sys , tempfile , webbrowser , yaml
18
25
19
26
# ----------------------------- Default settings ----------------------------- #
20
27
@@ -159,7 +166,9 @@ def commit(verbose, response_token_size, yes, skip_pre_commit, files): # noqa:
159
166
chain = LLMChain (llm = llm , prompt = prompt , verbose = verbose )
160
167
response = chain .run (diff_context )
161
168
162
- commit_message_approved = click .confirm ("Do you want to use this commit message (type n to edit)?" , default = True )
169
+ commit_message_approved = click .confirm (
170
+ "Do you want to use this commit message (type n to edit)?" , default = True
171
+ )
163
172
164
173
# Write the commit message to a temporary file
165
174
with tempfile .NamedTemporaryFile (mode = "w" , delete = False ) as temp :
@@ -349,7 +358,7 @@ def fun_fact(verbose, response_token_size):
349
358
# Set up the chain
350
359
chain = LLMChain (llm = llm , prompt = prompt , verbose = verbose )
351
360
352
- year = random .randint (1942 , datetime .datetime . utcnow ().year )
361
+ year = random .randint (1942 , datetime .utcnow ().year )
353
362
chain .run (f"programming and artificial intelligence in the year { year } " )
354
363
355
364
@@ -368,7 +377,7 @@ def learn(repo_url, verbose):
368
377
369
378
owner , repo_name = Coder .parse_github_url (repo_url )
370
379
371
- start_time = datetime .datetime . utcnow ()
380
+ start_time = datetime .utcnow ()
372
381
373
382
local_data_dir = get_local_data_dir ()
374
383
@@ -382,7 +391,7 @@ def learn(repo_url, verbose):
382
391
383
392
with console .status ("Storing the repo in the vector store" , spinner = DEFAULT_SPINNER ):
384
393
store_documents (documents , vector_store_dir )
385
- console .print (f"✅ Repo loaded and indexed in { datetime .datetime . utcnow () - start_time } seconds." )
394
+ console .print (f"✅ Repo loaded and indexed in { datetime .utcnow () - start_time } seconds." )
386
395
387
396
388
397
@cli .command
@@ -433,7 +442,9 @@ def review(commit, verbose, output_format, response_token_size, files):
433
442
434
443
else :
435
444
# Stream live
436
- console .print ("Examining the diff and generating the review for the following files:\n \t " + "\n \t " .join (files ))
445
+ console .print (
446
+ "Examining the diff and generating the review for the following files:\n \t " + "\n \t " .join (files )
447
+ )
437
448
with Live (Markdown ("" ), auto_refresh = True ) as live :
438
449
llm .streaming = True
439
450
llm .callbacks = [RichLiveCallbackHandler (live , bot_style )]
@@ -446,7 +457,7 @@ def review(commit, verbose, output_format, response_token_size, files):
446
457
@click .option ("-v" , "--verbose" , count = True )
447
458
@click .option ("-t" , "--response-token-size" , type = int , default = DEFAULT_MAX_TOKENS * 3 )
448
459
@click .argument ("files" , nargs = - 1 )
449
- def sidekick (request , verbose , response_token_size , files ):
460
+ def sidekick (request , verbose , response_token_size , files ): # noqa: PLR0915
450
461
"""
451
462
EXPERIMENTAL: Coding help from your AI sidekick\n
452
463
FILES: List of files to be used as context for the session
@@ -462,6 +473,16 @@ def sidekick(request, verbose, response_token_size, files):
462
473
# git history
463
474
context = generate_files_context (files )
464
475
476
+ def show_file_context (files ):
477
+ console .print ("Files loaded in this session:" )
478
+ for file in files :
479
+ token_length = Coder .get_token_length (Path (file ).read_text ())
480
+ console .print (f"\t { file } ({ humanize .intcomma (token_length )} tokens)" )
481
+
482
+ if files :
483
+ files = set (files ) # Dedupe
484
+ show_file_context (files )
485
+
465
486
# Generate the prompt and set up the model
466
487
prompt = get_prompt ("sidekick" )
467
488
memory_token_size = response_token_size * 2 # Allow decent history
@@ -474,33 +495,64 @@ def sidekick(request, verbose, response_token_size, files):
474
495
475
496
llm = Coder .get_llm (model_name , verbose , response_token_size , streaming = True )
476
497
477
- # Open the temporary file in the user's editor
478
- editor = Path (os .getenv ("EDITOR" , "/usr/bin/vim" )).name
479
-
480
498
# Set up the chain
481
499
memory = ConversationTokenBufferMemory (
482
500
memory_key = "chat_history" , input_key = "task" , llm = llm , max_token_limit = memory_token_size
483
501
)
484
502
chain = LLMChain (llm = llm , prompt = prompt , memory = memory , verbose = verbose )
485
503
history_file = Path .home () / ".aicodebot_request_history"
486
504
487
- console .print (f"Enter a request OR (q) quit, OR (e) to edit using { editor } " )
505
+ console .print (
506
+ "Enter a request for your AICodeBot sidekick. Type / to see available commands.\n " , style = bot_style
507
+ )
488
508
while True : # continuous loop for multiple questions
489
509
edited_input = None
490
510
if request :
491
511
human_input = request
492
512
else :
493
- human_input = input_prompt ("🤖 ➤ " , history = FileHistory (history_file )).strip ()
513
+ human_input = input_prompt ("🤖 ➤ " , history = FileHistory (history_file ), completer = SidekickCompleter ())
514
+ human_input = human_input .strip ()
494
515
if not human_input :
495
516
# Must have been spaces or blank line
496
517
continue
497
- elif len (human_input ) == 1 :
498
- if human_input .lower () == "q" :
499
- break
500
- elif human_input .lower () == "e" :
518
+
519
+ if human_input .startswith ("/" ):
520
+ cmd = human_input .lower ().split ()[0 ]
521
+ # Handle commands
522
+ if cmd in ["/add" , "/drop" ]:
523
+ # Get the filename
524
+ # If they didn't specify a file, then ignore
525
+ try :
526
+ filename = human_input .split ()[1 ]
527
+ except IndexError :
528
+ continue
529
+
530
+ # If the file doesn't exist, or we can't open it, let them know
531
+ if not Path (filename ).exists ():
532
+ console .print (f"File '{ filename } ' doesn't exist." , style = error_style )
533
+ continue
534
+
535
+ if cmd == "/add" :
536
+ files .add (filename )
537
+ console .print (f"✅ Added '{ filename } ' to the list of files." )
538
+ elif cmd == "/drop" :
539
+ # Drop the file from the list
540
+ files .discard (filename )
541
+ console .print (f"✅ Dropped '{ filename } ' from the list of files." )
542
+
543
+ context = generate_files_context (files )
544
+ show_file_context (files )
545
+ continue
546
+ elif cmd == "/edit" :
501
547
human_input = edited_input = click .edit ()
548
+ elif cmd == "/files" :
549
+ show_file_context (files )
550
+ continue
551
+ elif cmd == "/quit" :
552
+ break
553
+
502
554
elif human_input .lower ()[- 2 :] == r"\e" :
503
- # If the text ends with \e then we want to edit it
555
+ # If the text ends wit then we want to edit it
504
556
human_input = edited_input = click .edit (human_input [:- 2 ])
505
557
506
558
if edited_input :
@@ -535,5 +587,5 @@ def setup_config():
535
587
return existing_config
536
588
537
589
538
- if __name__ == "__main__" :
590
+ if __name__ == "__main__" : # pragma: no cover
539
591
cli ()
0 commit comments