Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion nldb/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
"""talk to your database"""

__version__ = "0.3.8"
__version__ = "0.4.0"
6 changes: 3 additions & 3 deletions nldb/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def load_prompt_template():
async def ask(q: Union[str, None] = None):
"""Process a text query and return the SQL statement, results, and explanation."""
nldb = NLDB(prompt_template)
sql_statement = nldb.text_to_sql(q)
results, plain_text_results, answer = nldb.sql_to_answer(sql_statement)
sql_statement = await nldb.text_to_sql(q)
results, plain_text_results, answer = await nldb.sql_to_answer(sql_statement)
return {
"response": {
"sql": sql_statement,
Expand Down Expand Up @@ -68,4 +68,4 @@ async def serve_index():


def serve():
uvicorn.run("nldb.api:app", host=UVICORN_HOST, port=UVICORN_PORT, workers=2)
uvicorn.run("nldb.api:app", host=UVICORN_HOST, port=UVICORN_PORT, workers=1)
24 changes: 10 additions & 14 deletions nldb/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import os
import re
import sqlite3
from functools import lru_cache
from inspect import cleandoc
from timeit import default_timer as timer
from typing import Tuple
Expand All @@ -12,7 +11,7 @@
import openai
from tabulate import tabulate

from nldb.config import DATABASE
from nldb.config import DATABASE, OPENAI_API_KEY


class ttimer:
Expand Down Expand Up @@ -53,7 +52,7 @@ def markdown_to_python(markdown_str):
return None


def cache_chat_completion(prompt_messages):
async def cache_chat_completion(prompt_messages):
"""File-based cache for GPT-3.5 chat completions"""
# hash the prompt messages
m = hashlib.sha256()
Expand All @@ -65,10 +64,11 @@ def cache_chat_completion(prompt_messages):
if os.path.exists(cache_file):
return json.loads(open(cache_file).read())
# otherwise, run the completion
resp = openai.ChatCompletion.create(
resp = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=prompt_messages,
temperature=0,
api_key=OPENAI_API_KEY,
)
# save the response to the cache
if not os.path.exists(cache_dir):
Expand All @@ -92,17 +92,15 @@ def get_prompt_template(self) -> str:
uncommented_lines = [line for line in lines if not line.strip().startswith("#")]
return "\n".join(uncommented_lines).strip()

@lru_cache
def text_to_sql(self, question: str) -> str:
async def text_to_sql(self, question: str) -> str:
# uses GPT-3.5 to convert a question into a SQL statement
self.question = question
prompt_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": self.prompt_template % question},
]
with ttimer() as gpt_timer:
resp = cache_chat_completion(prompt_messages)
print
resp = await cache_chat_completion(prompt_messages)
self.timings.append(gpt_timer.time)
self.tokens += resp["usage"]["total_tokens"]

Expand All @@ -123,8 +121,7 @@ def execute_sql(self, query: str) -> Tuple[list, list]:
connection.close()
return (columns, result)

@lru_cache
def sql_to_answer(self, sql: str) -> Tuple[str, str]:
async def sql_to_answer(self, sql: str) -> Tuple[str, str]:
# executes the SQL statement and asks GPT to explain them
prompt_template = self.prompt_template
question = self.question
Expand Down Expand Up @@ -159,15 +156,14 @@ def sql_to_answer(self, sql: str) -> Tuple[str, str]:
]

with ttimer() as gpt_timer:
resp = cache_chat_completion(prompt_messages)
resp = await cache_chat_completion(prompt_messages)
self.timings.append(gpt_timer.time)

answer = resp["choices"][0]["message"]["content"].strip()
self.tokens += resp["usage"]["total_tokens"]
return (html_results, plain_text_results, answer)

@lru_cache
def results_to_chart(self, question: str, results: str) -> Tuple[str, str]:
async def results_to_chart(self, question: str, results: str) -> Tuple[str, str]:
# uses GPT-3.5 to convert a question and answer into a chart
chart_prompt = f"""Given this question:

Expand Down Expand Up @@ -199,7 +195,7 @@ def results_to_chart(self, question: str, results: str) -> Tuple[str, str]:
},
]
with ttimer() as gpt_timer:
resp = cache_chat_completion(prompt_messages)
resp = await cache_chat_completion(prompt_messages)
self.timings.append(gpt_timer.time)

chart_code = resp["choices"][0]["message"]["content"].strip()
Expand Down