diff --git a/dailalib/__init__.py b/dailalib/__init__.py index ff8147f..dde8cee 100644 --- a/dailalib/__init__.py +++ b/dailalib/__init__.py @@ -1,4 +1,4 @@ -__version__ = "3.15.6" +__version__ = "3.16.0" import os # stop LiteLLM from querying at all to the remote server @@ -11,13 +11,17 @@ def create_plugin(*args, **kwargs): - from libbs.api import DecompilerInterface - + from libbs.api import DecompilerInterface # # LLM API (through LiteLLM api) # litellm_api = LiteLLMAIAPI(delay_init=True) + + # load config before creating context menus. if not, the config is only be load after "OK" button is + # clicked in ask_settings :X + litellm_api.load_or_create_config() + # create context menus for prompts gui_ctx_menu_actions = { f"DAILA/LLM/{prompt_name}": (prompt.desc, getattr(litellm_api, prompt_name)) @@ -27,12 +31,7 @@ def create_plugin(*args, **kwargs): gui_ctx_menu_actions["DAILA/LLM/chat"] = ("Open LLM Chat...", get_llm_chat_creator(litellm_api)) # create context menus for others - gui_ctx_menu_actions["DAILA/LLM/Settings/update_api_key"] = ("Update API key...", litellm_api.ask_api_key) - gui_ctx_menu_actions["DAILA/LLM/Settings/update_pmpt_style"] = ("Change prompt style...", litellm_api.ask_prompt_style) - gui_ctx_menu_actions["DAILA/LLM/Settings/update_model"] = ("Change model...", litellm_api.ask_model) - gui_ctx_menu_actions["DAILA/LLM/Settings/update_custom_url"] = ("Set Custom OpenAI Endpoint...", litellm_api.ask_custom_endpoint) - gui_ctx_menu_actions["DAILA/LLM/Settings/update_custom_model"] = ("Set Custom OpenAI Model...", litellm_api.ask_custom_model) - + gui_ctx_menu_actions["DAILA/LLM/Settings"] = ("Settings...", litellm_api.ask_settings) # # VarModel API (local variable renaming) # diff --git a/dailalib/api/litellm/__init__.py b/dailalib/api/litellm/__init__.py index 90050c3..11f2a1e 100644 --- a/dailalib/api/litellm/__init__.py +++ b/dailalib/api/litellm/__init__.py @@ -1 +1,41 @@ +DEFAULT_MODEL = "gpt-4o" +OPENAI_MODELS = {"gpt-4", "gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo", "o1-mini", "o1-preview"} +# TODO: How can I get this MODEL_TO_TOKENS in the future, without hardcopy to `configuration` +MODEL_TO_TOKENS = { + # TODO: update the token values for o1 + "o1-mini": 8_000, + "o1-preview": 8_000, + "gpt-4o": 8_000, + "gpt-4o-mini": 16_000, + "gpt-4-turbo": 128_000, + "claude-3-5-sonnet-20240620": 200_000, + "gemini/gemini-pro": 12_288, + "vertex_ai_beta/gemini-pro": 12_288, + # perplex is on legacy mode :( + "perplexity/llama-3.1-sonar-small-128k-online": 127_072, + "perplexity/llama-3.1-sonar-medium-128k-online": 127_072, + "perplexity/llama-3.1-sonar-large-128k-online": 127_072, + "sonar-pro": 127_072, + "sonar": 127_072, +} + +LLM_COST = { + "gpt-4o": {"prompt_price": 2.5, "completion_price": 10}, + "gpt-4o-mini": {"prompt_price": 0.150, "completion_price": 0.600}, + "gpt-4-turbo": {"prompt_price": 10, "completion_price": 30}, + "claude-3.5-sonnet-20240620": {"prompt_price": 3, "completion_price": 15}, + "gemini/gemini-pro": {"prompt_price": 0.150, "completion_price": 0.600}, + "vertex_ai_beta/gemini-pro": {"prompt_price": 0.150, "completion_price": 0.600}, + # perplex is on legacy mode not available from 02/22/25:( + "perplexity/llama-3.1-sonar-small-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, + "perplexity/llama-3.1-sonar-large-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, + "perplexity/llama-3.1-sonar-huge-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, + # introduced the new sonar-pro/sonar + "sonar": {"prompt_price": 0.150, "completion_price": 0.600}, + "sonar-pro": {"prompt_price": 0.150, "completion_price": 0.600}, +} + +# delay import for const creation from .litellm_api import LiteLLMAIAPI +from .prompt_type import PromptType, ALL_STYLES, DEFAULT_STYLE + diff --git a/dailalib/api/litellm/config_dialog.py b/dailalib/api/litellm/config_dialog.py new file mode 100644 index 0000000..19a2830 --- /dev/null +++ b/dailalib/api/litellm/config_dialog.py @@ -0,0 +1,160 @@ +import logging +from typing import Optional + +from dailalib.configuration import DAILAConfig +from .prompt_type import ALL_STYLES +from . import MODEL_TO_TOKENS + +from libbs.ui.qt_objects import ( + QDialog, + QGridLayout, + QHBoxLayout, + QLabel, + QLineEdit, + QPushButton, + QVBoxLayout, + QComboBox, +) + +_l = logging.getLogger(__name__) +AVAILABLE_MODELS = MODEL_TO_TOKENS.keys() + + +class DAILAConfigDialog(QDialog): + TITLE = "DAILA Configuration" + + def __init__(self, config: DAILAConfig, parent=None): + """ + Constructor for the DAILA configuration dialog. + params: + + config: config object, passed from litellm_api when calling this dialog + """ + + super().__init__(parent) + self.configured = False + self.DAILAConfig = config + + self.setWindowTitle(self.TITLE) + self._main_layout = QVBoxLayout() + self._grid_layout = QGridLayout() + self.row = 0 + + self._init_middle_widgets() + self._main_layout.addLayout(self._grid_layout) + + self._init_close_btn_widgets() + + self.setLayout(self._main_layout) + + def _init_middle_widgets(self): + """ + """ + + # LLM Model + llm_model = self.DAILAConfig.model + llm_model_label = QLabel("LLM Model:") + llm_model_label.setToolTip("The model to use for LiteLLM.") + + # using dropdown for LLM model + self._llm_model_edit = QComboBox(self) + self._llm_model_edit.addItems(AVAILABLE_MODELS) + self._llm_model_edit.setCurrentText(llm_model) + self._grid_layout.addWidget(llm_model_label, self.row, 0) + self._grid_layout.addWidget(self._llm_model_edit, self.row, 1) + self.row += 1 + + # API Key + + api_key = self.DAILAConfig.api_key + api_key_label = QLabel("API Key:") + api_key_label.setToolTip("The API key to use for LiteLLM, for the selected model.") + self._api_key_edit = QLineEdit(self) + self._api_key_edit.setText(api_key) + self._grid_layout.addWidget(api_key_label, self.row, 0) + self._grid_layout.addWidget(self._api_key_edit, self.row, 1) + self.row += 1 + + # Prompt Style + + prompt_style = self.DAILAConfig.prompt_style + prompt_style_label = QLabel("Prompt Style:") + prompt_style_label.setToolTip("The prompt style for DAILA to use, refer to dailalib/litellm/prompts for details.") + + # using dropdown for prompt style + self._prompt_style_edit = QComboBox(self) + self._prompt_style_edit.addItems(ALL_STYLES) + self._prompt_style_edit.setCurrentText(prompt_style) + self._grid_layout.addWidget(prompt_style_label, self.row, 0) + self._grid_layout.addWidget(self._prompt_style_edit, self.row, 1) + self.row += 1 + + # Custom OpenAI Endpoint + + custom_endpoint = self.DAILAConfig.custom_endpoint + custom_endpoint_label = QLabel("Custom OpenAI Endpoint:") + custom_endpoint_label.setToolTip("The custom OpenAI endpoint to use for LiteLLM.") + self._custom_endpoint_edit = QLineEdit(self) + self._custom_endpoint_edit.setText(custom_endpoint) + self._grid_layout.addWidget(custom_endpoint_label, self.row, 0) + self._grid_layout.addWidget(self._custom_endpoint_edit, self.row, 1) + self.row += 1 + + # Custom OpenAI Model + + custom_model = self.DAILAConfig.custom_model + custom_model_label = QLabel("Custom OpenAI Model:") + custom_model_label.setToolTip("The custom OpenAI model to use for LiteLLM.") + self._custom_model_edit = QLineEdit(self) + self._custom_model_edit.setText(custom_model) + self._grid_layout.addWidget(custom_model_label, self.row, 0) + self._grid_layout.addWidget(self._custom_model_edit, self.row, 1) + self.row += 1 + + def _init_close_btn_widgets(self): + # buttons + self._ok_button = QPushButton(self) + self._ok_button.setText("OK") + self._ok_button.setDefault(True) + self._ok_button.clicked.connect(self._on_ok_clicked) + + cancel_button = QPushButton(self) + cancel_button.setText("Cancel") + cancel_button.clicked.connect(self._on_cancel_clicked) + + buttons_layout = QHBoxLayout() + buttons_layout.addWidget(self._ok_button) + buttons_layout.addWidget(cancel_button) + + self._main_layout.addLayout(buttons_layout) + + def _on_cancel_clicked(self): + self.close() + + def parse_api_key(self, api_key_or_path: str) -> Optional[str]: + """ + Parse the API key from the input string. + """ + if "/" in api_key_or_path or "\\" in api_key_or_path: + # treat as path + with open(api_key_or_path, "r") as f: + api_key = f.read().strip() + else: + api_key = api_key_or_path + return api_key + + def _on_ok_clicked(self): + self.DAILAConfig.model = self._llm_model_edit.currentText() + self.DAILAConfig.api_key = self.parse_api_key(self._api_key_edit.text()) + self.DAILAConfig.prompt_style = self._prompt_style_edit.currentText() + self.DAILAConfig.custom_endpoint = self._custom_endpoint_edit.text() + self.DAILAConfig.custom_model = self._custom_model_edit.text() + self.configured = True + self.close() + + def config_dialog_exec(self): + self.exec() + if not self.configured: + _l.warning("DAILA Configuration dialog was closed without saving changes.") + else: + _l.info("DAILA Configuration dialog was closed and changes were saved.") + return self.DAILAConfig \ No newline at end of file diff --git a/dailalib/api/litellm/litellm_api.py b/dailalib/api/litellm/litellm_api.py index 3371933..7c0075d 100644 --- a/dailalib/api/litellm/litellm_api.py +++ b/dailalib/api/litellm/litellm_api.py @@ -1,32 +1,23 @@ +from pathlib import Path from typing import Optional import os +import logging import tiktoken +from . import DEFAULT_MODEL, LLM_COST, OPENAI_MODELS from ..ai_api import AIAPI +from dailalib.configuration import DAILAConfig active_model = None active_prompt_style = None +_l = logging.getLogger(__name__) + class LiteLLMAIAPI(AIAPI): prompts_by_name = [] - DEFAULT_MODEL = "gpt-4o" - OPENAI_MODELS = {"gpt-4", "gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo", "o1-mini", "o1-preview"} - MODEL_TO_TOKENS = { - # TODO: update the token values for o1 - "o1-mini": 8_000, - "o1-preview": 8_000, - "gpt-4o": 8_000, - "gpt-4o-mini": 16_000, - "gpt-4-turbo": 128_000, - "claude-3-5-sonnet-20240620": 200_000, - "gemini/gemini-pro": 12_288, - "vertex_ai_beta/gemini-pro": 12_288, - "perplexity/llama-3.1-sonar-large-128k-online": 127_072 - } - - # replacement strings for API calls + def __init__( self, api_key: Optional[str] = None, @@ -38,9 +29,12 @@ def __init__( chat_event_callbacks: Optional[dict] = None, custom_endpoint: Optional[str] = None, custom_model: Optional[str] = None, + use_config: bool = True, **kwargs ): super().__init__(**kwargs) + + # default values self._api_key = None # default to openai api key if not provided self.api_key = api_key or os.getenv("OPENAI_API_KEY") @@ -51,6 +45,11 @@ def __init__( self.chat_event_callbacks = chat_event_callbacks or {"send": None, "receive": None} self.custom_endpoint = custom_endpoint self.custom_model = custom_model + self.config = DAILAConfig() + if use_config: + loaded = self.load_or_create_config() + if loaded: + _l.info("Loaded config file from %s", self.config.save_location) # delay prompt import from .prompts import PROMPTS @@ -62,6 +61,28 @@ def __init__( active_model = self.model active_prompt_style = self.prompt_style + def load_or_create_config(self, new_config=None) -> bool: + if new_config: + self.config = new_config + self.config.save() + + if self.config.save_location and not Path(self.config.save_location).exists(): + return False + + # load the config + self.config.load() + self.model = self.config.model + self.api_key = self.config.api_key + self.prompt_style = self.config.prompt_style + if self.config.custom_endpoint: + self.custom_endpoint = self.config.custom_endpoint + if self.config.custom_model: + self.custom_model = self.config.custom_model + # update the globals (for threading hacks) + self._set_model(self.model) + self._set_prompt_style(self.prompt_style) + return True + def __dir__(self): return list(super().__dir__()) + list(self.prompts_by_name.keys()) @@ -74,6 +95,73 @@ def __getattribute__(self, item): else: return object.__getattribute__(self, item) + @property + def api_key(self): + if not self._api_key or self.model is None: + return None + elif self.model in OPENAI_MODELS: + return os.getenv("OPENAI_API_KEY", None) + elif "claude" in self.model: + return os.getenv("ANTHROPIC_API_KEY", None) + elif "gemini/gemini" in self.model: + return os.getenv("GEMINI_API_KEY", None) + elif "sonar" in self.model or "perplexity" in self.model: + return os.getenv("PERPLEXITY_API_KEY", None) + elif "vertex" in self.model: + return self._api_key + else: + return None + + @api_key.setter + def api_key(self, value): + self._api_key = value + _l.info(f"API key set to {self.model}") + if self._api_key and self.model is not None: + if self.model in OPENAI_MODELS: + os.environ["OPENAI_API_KEY"] = self._api_key + elif "claude" in self.model: + os.environ["ANTHROPIC_API_KEY"] = self._api_key + elif "gemini/gemini" in self.model: + os.environ["GEMINI_API_KEY"] = self._api_key + elif "sonar" in self.model or "perplexity" in self.model: + os.environ["PERPLEXITY_API_KEY"] = self._api_key + elif "vertex" in self.model: + os.environ["VERTEX_API_KEY"] = self._api_key + else: + _l.error(f"API key not set for model {self.model}") + + @property + def custom_model(self): + return self._custom_model + + @custom_model.setter + def custom_model(self, value): + custom_model = value.strip() if isinstance(value, str) else None + if not custom_model: + self._custom_model = None + _l.info(f"Custom model selection cleared, or not in use") + return + self._custom_model = "openai/" + custom_model.strip() + _l.info(f"Custom model set to {self._custom_model}") + + @property + def custom_endpoint(self): + return self._custom_endpoint + + @custom_endpoint.setter + def custom_endpoint(self, value): + custom_endpoint = value.strip() if isinstance(value, str) else None + if not custom_endpoint: + self._custom_endpoint = None + _l.info(f"Custom endpoint disabled, defaulting to online API") + return + if not (custom_endpoint.lower().startswith("http://") or custom_endpoint.lower().startswith("https://")): + self._custom_endpoint = None + _l.error("Invalid endpoint format") + return + self._custom_endpoint = custom_endpoint.strip() + _l.info(f"Custom endpoint set to {self._custom_endpoint}") + def query_model( self, prompt: str, @@ -147,93 +235,15 @@ def fit_decompilation_to_token_max(decompilation: str, delta_step=10, model=DEFA @staticmethod def llm_cost(model_name: str, prompt_tokens: int, completion_tokens: int) -> float | None: # these are the $ per million tokens - COST = { - "gpt-4o": {"prompt_price": 2.5, "completion_price": 10}, - "gpt-4o-mini": {"prompt_price": 0.150, "completion_price": 0.600}, - "gpt-4-turbo": {"prompt_price": 10, "completion_price": 30}, - "claude-3.5-sonnet-20240620": {"prompt_price": 3, "completion_price": 15}, - "gemini/gemini-pro": {"prompt_price": 0.150, "completion_price": 0.600}, - "vertex_ai_beta/gemini-pro": {"prompt_price": 0.150, "completion_price": 0.600}, - # welp perplex doesn't have a completion price for now in their API :X - "perplexity/llama-3.1-sonar-small-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, - "perplexity/llama-3.1-sonar-large-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, - "perplexity/llama-3.1-sonar-huge-128k-online": {"prompt_price": 0.150, "completion_price": 0.600}, - } - if model_name not in COST: + if model_name not in LLM_COST: return None - llm_price = COST[model_name] + llm_price = LLM_COST[model_name] prompt_price = (prompt_tokens / 1000000) * llm_price["prompt_price"] completion_price = (completion_tokens / 1000000) * llm_price["completion_price"] return round(prompt_price + completion_price, 5) - # - # LMM Settings - # - - @property - def api_key(self): - if not self._api_key or self.model is None: - return None - elif self.model in self.OPENAI_MODELS: - return os.getenv("OPENAI_API_KEY", None) - elif "claude" in self.model: - return os.getenv("ANTHROPIC_API_KEY", None) - elif "gemini/gemini" in self.model: - return os.getenv("GEMINI_API_KEY", None) - elif "perplexity" in self.model: - return os.getenv("PERPLEXITY_API_KEY", None) - elif "vertex" in self.model: - return self._api_key - else: - return None - - @api_key.setter - def api_key(self, value): - self._api_key = value - if self._api_key and self.model is not None: - if self.model in self.OPENAI_MODELS: - os.environ["OPENAI_API_KEY"] = self._api_key - elif "claude" in self.model: - os.environ["ANTHROPIC_API_KEY"] = self._api_key - elif "gemini/gemini" in self.model: - os.environ["GEMINI_API_KEY"] = self._api_key - elif "perplexity" in self.model: - os.environ["PERPLEXITY_API_KEY"] = self._api_key - - def ask_api_key(self, *args, **kwargs): - api_key_or_path = self._dec_interface.gui_ask_for_string("Enter you AI API Key or Creds Path:", title="DAILA") - if "/" in api_key_or_path or "\\" in api_key_or_path: - # treat as path - with open(api_key_or_path, "r") as f: - api_key = f.read().strip() - else: - api_key = api_key_or_path - self.api_key = api_key - - def ask_custom_endpoint(self, *args, **kwargs): - custom_endpoint = self._dec_interface.gui_ask_for_string("Enter your custom OpenAI endpoint:", title="DAILA") - if not custom_endpoint.strip(): - self.custom_endpoint = None - self._dec_interface.info(f"Custom endpoint disabled, defaulting to online API") - return - if not (custom_endpoint.lower().startswith("http://") or custom_endpoint.lower().startswith("https://")): - self.custom_endpoint = None - self._dec_interface.error("Invalid endpoint format") - return - self.custom_endpoint = custom_endpoint.strip() - self._dec_interface.info(f"Custom endpoint set to {self.custom_endpoint}") - - def ask_custom_model(self, *args, **kwargs): - custom_model = self._dec_interface.gui_ask_for_string("Enter your custom OpenAI model name:", title="DAILA") - if not custom_model.strip(): - self.custom_model = None - self._dec_interface.info(f"Custom model selection cleared") - return - self.custom_model = "openai/" + custom_model.strip() - self._dec_interface.info(f"Custom model set to {self.custom_model}") - def _set_prompt_style(self, prompt_style): self.prompt_style = prompt_style global active_prompt_style @@ -248,42 +258,20 @@ def get_model(self): # TODO: this hack needs to be refactored later global active_model return str(active_model) + + # + # LLM Settings + # - def ask_prompt_style(self, *args, **kwargs): - if self._dec_interface is not None: - from .prompts import ALL_STYLES - - prompt_style = self.prompt_style - style_choices = ALL_STYLES.copy() - if self.prompt_style: - style_choices.remove(self.prompt_style) - style_choices = [self.prompt_style] + style_choices - - p_style = self._dec_interface.gui_ask_for_choice( - "What prompting style would you like to use?", - style_choices, - title="DAILA" - ) - if p_style != prompt_style and p_style: - if p_style not in ALL_STYLES: - self._dec_interface.error(f"Prompt style {p_style} is not supported.") - return - - self._set_prompt_style(p_style) - self._dec_interface.info(f"Prompt style set to {p_style}") - - def ask_model(self, *args, **kwargs): - if self._dec_interface is not None: - model_choices = list(LiteLLMAIAPI.MODEL_TO_TOKENS.keys()) - if self.model: - model_choices.remove(self.model) - model_choices = [self.model] + model_choices - - model = self._dec_interface.gui_ask_for_choice( - "What LLM model would you like to use?", - model_choices, - title="DAILA" - ) - self._set_model(model) - self._dec_interface.info(f"Model set to {model}") - + # single function to ask for all the settings + def ask_settings(self, *args, **kwargs): + # delay import + from .config_dialog import DAILAConfigDialog + # attempts to ask for all the configurations by the user. + dialog = DAILAConfigDialog(self.config) + new_config = dialog.config_dialog_exec() + if new_config: + self.load_or_create_config(new_config=new_config) + self._dec_interface.info("DAILA Settings applied.") + else: + self._dec_interface.error("DAILA Settings not applied.") \ No newline at end of file diff --git a/dailalib/api/litellm/prompts/prompt_type.py b/dailalib/api/litellm/prompt_type.py similarity index 100% rename from dailalib/api/litellm/prompts/prompt_type.py rename to dailalib/api/litellm/prompt_type.py diff --git a/dailalib/api/litellm/prompts/__init__.py b/dailalib/api/litellm/prompts/__init__.py index fde109e..39f570d 100644 --- a/dailalib/api/litellm/prompts/__init__.py +++ b/dailalib/api/litellm/prompts/__init__.py @@ -1,4 +1,4 @@ -from .prompt_type import PromptType, DEFAULT_STYLE, ALL_STYLES +from dailalib.api.litellm.prompt_type import PromptType from .prompt import Prompt diff --git a/dailalib/api/litellm/prompts/prompt.py b/dailalib/api/litellm/prompts/prompt.py index 73a70af..d77a546 100644 --- a/dailalib/api/litellm/prompts/prompt.py +++ b/dailalib/api/litellm/prompts/prompt.py @@ -6,7 +6,7 @@ from ...ai_api import AIAPI from ..litellm_api import LiteLLMAIAPI -from .prompt_type import PromptType +from dailalib.api.litellm.prompt_type import PromptType from libbs.artifacts import Comment, Function, Context from jinja2 import Template, StrictUndefined diff --git a/dailalib/configuration.py b/dailalib/configuration.py new file mode 100644 index 0000000..228cc92 --- /dev/null +++ b/dailalib/configuration.py @@ -0,0 +1,31 @@ +from libbs.configuration import BSConfig +from typing import Optional, Dict +from platformdirs import user_config_dir +import logging + +_l = logging.getLogger(__name__) + + +class DAILAConfig(BSConfig): + """ + Configuration class for LLM API, model, prompt style, and probably other things in the future. + """ + __slots__ = ( + "save_location", + "_config_lock", + "model", # LLM Model selected by user, + "api_key", # API keys for selected model, + "prompt_style", # Prompt style selected by user, + "custom_endpoint", # Custom OpenAI endpoint + "custom_model" # Custom OpenAI model + ) + + def __init__(self, save_location: Optional[str] = None): + save_location = user_config_dir("daila") + super().__init__(save_location) + self.save_location = self.save_location / f"{self.__class__.__name__}.toml" + self.model = "gpt-4o" + self.api_key = "THISISAFAKEAPIKEY" + self.prompt_style = "few-shot" + self.custom_endpoint = "" + self.custom_model = "" diff --git a/setup.cfg b/setup.cfg index cfd89e9..5a11ca5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -30,3 +30,5 @@ console_scripts = [options.extras_require] full = varbert>=2.3.0 + PySide6-Essentials>=6.4.2 +