diff --git a/README.md b/README.md index 3ea406fc..a181d588 100644 --- a/README.md +++ b/README.md @@ -378,6 +378,10 @@ default: * `git_push_force` – adds `--force-with-lease` to a `git push` (may conflict with `git_push_pull`); * `rm_root` – adds `--no-preserve-root` to `rm -rf /` command. +The following rule uses OpenAI ChatGPT. To enable it, you need to set the environment variable `THEFUCK_OPENAI_TOKEN=` and pass in `--chatgpt [No. SUGGESTIONS >= 1]`: + +- `chatgpt` – queries ChatGPT for suggestions. Arguments: `--chatgpt [No. SUGGESTIONS, default=0] --chatgpt-token [default=100] --chatgpt-model [default="gpt-3.5-turbo"]`. + ##### [Back to Contents](#contents) ## Creating your own rules diff --git a/requirements.txt b/requirements.txt index 33ae7a9d..e85ac804 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ pypandoc pytest-benchmark pytest-docker-pexpect twine +openai diff --git a/tests/test_argument_parser.py b/tests/test_argument_parser.py index e5f1f44b..19fff106 100644 --- a/tests/test_argument_parser.py +++ b/tests/test_argument_parser.py @@ -8,7 +8,9 @@ def _args(**override): 'help': False, 'version': False, 'debug': False, 'force_command': None, 'repeat': False, 'enable_experimental_instant_mode': False, - 'shell_logger': None} + 'shell_logger': None, 'chatgpt': 0, + 'chatgpt_token': 100, + 'chatgpt_model': 'gpt-3.5-turbo'} args.update(override) return args diff --git a/tests/test_conf.py b/tests/test_conf.py index e03473ab..98509d60 100644 --- a/tests/test_conf.py +++ b/tests/test_conf.py @@ -75,7 +75,7 @@ class TestSettingsFromEnv(object): def test_settings_from_args(settings): - settings.init(Mock(yes=True, debug=True, repeat=True)) + settings.init(Mock(yes=True, debug=True, repeat=True, chatgpt=0, chatgpt_token=100)) assert not settings.require_confirmation assert settings.debug assert settings.repeat diff --git a/thefuck/argument_parser.py b/thefuck/argument_parser.py index 69c247f1..32cd411c 100644 --- a/thefuck/argument_parser.py +++ b/thefuck/argument_parser.py @@ -37,6 +37,24 @@ class Parser(object): '-h', '--help', action='store_true', help='show this help message and exit') + self._parser.add_argument( + '-c', '--chatgpt', + type=int, + default=0, + help='number of ChatGPT suggestions. set to 0 to disable ChatGPT' + ) + self._parser.add_argument( + '-t', '--chatgpt-token', + type=int, + default=100, + help='maximum ChatGPT tokens per query' + ) + self._parser.add_argument( + '-m', '--chatgpt-model', + type=str, + default="gpt-3.5-turbo", + help='ChatGPT model' + ) self._add_conflicting_arguments() self._parser.add_argument( '-d', '--debug', diff --git a/thefuck/conf.py b/thefuck/conf.py index 27876ef4..4439147a 100644 --- a/thefuck/conf.py +++ b/thefuck/conf.py @@ -124,6 +124,10 @@ class Settings(dict): from_args['debug'] = args.debug if args.repeat: from_args['repeat'] = args.repeat + + from_args['chatgpt'] = args.chatgpt if args.chatgpt >= 0 else 0 + from_args['chatgpt_token'] = args.chatgpt_token if args.chatgpt_token >= 0 else 0 + from_args['chatgpt_model'] = args.chatgpt_model return from_args diff --git a/thefuck/rules/chatgpt.py b/thefuck/rules/chatgpt.py new file mode 100644 index 00000000..c2161589 --- /dev/null +++ b/thefuck/rules/chatgpt.py @@ -0,0 +1,77 @@ +import platform +import openai +import re +import os +from thefuck import logs +from thefuck.conf import settings + + +def _check_chatgpt(api_key: str = None) -> bool: + openai.api_key = os.getenv("THEFUCK_OPENAI_TOKEN") + if settings["chatgpt"] > 0 and (api_key or openai.api_key): + return True + return False + + +enabled_by_default = _check_chatgpt() +logs.debug(f"ChatGPT enabled: {enabled_by_default}") + +MAX_NUMBER = settings["chatgpt"] +MAX_TOKENS = settings["chatgpt_token"] +MODEL = settings["chatgpt_model"] # by default: "gpt-3.5-turbo" + + +def match(command): + return _check_chatgpt() + + +def get_new_command(command): + result = _query_chatgpt( + command=command.script, + error=command.output, + explanation=False, + ) + logs.debug(f"chatgpt result: {result}") + return result + + +def _query_chatgpt( + command: str, + error: str, + explanation: bool, # can be used to include explanations but not used yet + number: int = MAX_NUMBER, + model: str = MODEL, + max_tokens: int = MAX_TOKENS, + api_key: str = None, +): + if api_key: + openai.api_key = api_key + elif openai.api_key is None: + return [] + + os_env = f"{platform.platform()}" + prompt = f""" +OS: `{os_env}` +Command: `{command}` +Error: `{error}` +Suggest {"one command" if number == 1 else f"{number} commands"} {"with" if explanation else "without"} explanation. +Commands:""" + + logs.debug("chatgpt: " + prompt) + + try: + response = openai.ChatCompletion.create( + model=model, + messages=[ + {"role": "user", "content": prompt}, + ], + max_tokens=max_tokens, + ) + content = response["choices"][0]["message"]["content"] + contents = [item.strip() for item in content.split("\n") if item.strip() != ""] + pattern = re.compile(r"^\d+\.\ *") + cleaned_contents = [re.sub(pattern, "", item).strip('`') for item in contents] + return cleaned_contents + except Exception as e: + logs.debug(f"chatgpt error: {e}") + return []