mirror of
https://github.com/nvbn/thefuck.git
synced 2025-01-18 12:06:04 +00:00
Add ChatGPT as a rule, disabled by default.
This commit is contained in:
parent
ceeaeab94b
commit
6251c9d9c8
@ -378,6 +378,10 @@ default:
|
||||
* `git_push_force` – adds `--force-with-lease` to a `git push` (may conflict with `git_push_pull`);
|
||||
* `rm_root` – adds `--no-preserve-root` to `rm -rf /` command.
|
||||
|
||||
The following rule uses OpenAI ChatGPT. To enable it, you need to set the environment variable `THEFUCK_OPENAI_TOKEN=<OpenAI Token>` and pass in `--chatgpt [No. SUGGESTIONS >= 1]`:
|
||||
|
||||
- `chatgpt` – queries ChatGPT for suggestions. Arguments: `--chatgpt [No. SUGGESTIONS, default=0] --chatgpt-token [default=100] --chatgpt-model [default="gpt-3.5-turbo"]`.
|
||||
|
||||
##### [Back to Contents](#contents)
|
||||
|
||||
## Creating your own rules
|
||||
|
@ -9,3 +9,4 @@ pypandoc
|
||||
pytest-benchmark
|
||||
pytest-docker-pexpect
|
||||
twine
|
||||
openai
|
||||
|
@ -8,7 +8,9 @@ def _args(**override):
|
||||
'help': False, 'version': False, 'debug': False,
|
||||
'force_command': None, 'repeat': False,
|
||||
'enable_experimental_instant_mode': False,
|
||||
'shell_logger': None}
|
||||
'shell_logger': None, 'chatgpt': 0,
|
||||
'chatgpt_token': 100,
|
||||
'chatgpt_model': 'gpt-3.5-turbo'}
|
||||
args.update(override)
|
||||
return args
|
||||
|
||||
|
@ -75,7 +75,7 @@ class TestSettingsFromEnv(object):
|
||||
|
||||
|
||||
def test_settings_from_args(settings):
|
||||
settings.init(Mock(yes=True, debug=True, repeat=True))
|
||||
settings.init(Mock(yes=True, debug=True, repeat=True, chatgpt=0, chatgpt_token=100))
|
||||
assert not settings.require_confirmation
|
||||
assert settings.debug
|
||||
assert settings.repeat
|
||||
|
@ -37,6 +37,24 @@ class Parser(object):
|
||||
'-h', '--help',
|
||||
action='store_true',
|
||||
help='show this help message and exit')
|
||||
self._parser.add_argument(
|
||||
'-c', '--chatgpt',
|
||||
type=int,
|
||||
default=0,
|
||||
help='number of ChatGPT suggestions. set to 0 to disable ChatGPT'
|
||||
)
|
||||
self._parser.add_argument(
|
||||
'-t', '--chatgpt-token',
|
||||
type=int,
|
||||
default=100,
|
||||
help='maximum ChatGPT tokens per query'
|
||||
)
|
||||
self._parser.add_argument(
|
||||
'-m', '--chatgpt-model',
|
||||
type=str,
|
||||
default="gpt-3.5-turbo",
|
||||
help='ChatGPT model'
|
||||
)
|
||||
self._add_conflicting_arguments()
|
||||
self._parser.add_argument(
|
||||
'-d', '--debug',
|
||||
|
@ -124,6 +124,10 @@ class Settings(dict):
|
||||
from_args['debug'] = args.debug
|
||||
if args.repeat:
|
||||
from_args['repeat'] = args.repeat
|
||||
|
||||
from_args['chatgpt'] = args.chatgpt if args.chatgpt >= 0 else 0
|
||||
from_args['chatgpt_token'] = args.chatgpt_token if args.chatgpt_token >= 0 else 0
|
||||
from_args['chatgpt_model'] = args.chatgpt_model
|
||||
return from_args
|
||||
|
||||
|
||||
|
77
thefuck/rules/chatgpt.py
Normal file
77
thefuck/rules/chatgpt.py
Normal file
@ -0,0 +1,77 @@
|
||||
import platform
|
||||
import openai
|
||||
import re
|
||||
import os
|
||||
from thefuck import logs
|
||||
from thefuck.conf import settings
|
||||
|
||||
|
||||
def _check_chatgpt(api_key: str = None) -> bool:
|
||||
openai.api_key = os.getenv("THEFUCK_OPENAI_TOKEN")
|
||||
if settings["chatgpt"] > 0 and (api_key or openai.api_key):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
enabled_by_default = _check_chatgpt()
|
||||
logs.debug(f"ChatGPT enabled: {enabled_by_default}")
|
||||
|
||||
MAX_NUMBER = settings["chatgpt"]
|
||||
MAX_TOKENS = settings["chatgpt_token"]
|
||||
MODEL = settings["chatgpt_model"] # by default: "gpt-3.5-turbo"
|
||||
|
||||
|
||||
def match(command):
|
||||
return _check_chatgpt()
|
||||
|
||||
|
||||
def get_new_command(command):
|
||||
result = _query_chatgpt(
|
||||
command=command.script,
|
||||
error=command.output,
|
||||
explanation=False,
|
||||
)
|
||||
logs.debug(f"chatgpt result: {result}")
|
||||
return result
|
||||
|
||||
|
||||
def _query_chatgpt(
|
||||
command: str,
|
||||
error: str,
|
||||
explanation: bool, # can be used to include explanations but not used yet
|
||||
number: int = MAX_NUMBER,
|
||||
model: str = MODEL,
|
||||
max_tokens: int = MAX_TOKENS,
|
||||
api_key: str = None,
|
||||
):
|
||||
if api_key:
|
||||
openai.api_key = api_key
|
||||
elif openai.api_key is None:
|
||||
return []
|
||||
|
||||
os_env = f"{platform.platform()}"
|
||||
prompt = f"""
|
||||
OS: `{os_env}`
|
||||
Command: `{command}`
|
||||
Error: `{error}`
|
||||
Suggest {"one command" if number == 1 else f"{number} commands"} {"with" if explanation else "without"} explanation.
|
||||
Commands:"""
|
||||
|
||||
logs.debug("chatgpt: " + prompt)
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
content = response["choices"][0]["message"]["content"]
|
||||
contents = [item.strip() for item in content.split("\n") if item.strip() != ""]
|
||||
pattern = re.compile(r"^\d+\.\ *")
|
||||
cleaned_contents = [re.sub(pattern, "", item).strip('`') for item in contents]
|
||||
return cleaned_contents
|
||||
except Exception as e:
|
||||
logs.debug(f"chatgpt error: {e}")
|
||||
return []
|
Loading…
x
Reference in New Issue
Block a user