Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit fdd07ad

Browse files
committed
quickstart hackathon
1 parent 8030a12 commit fdd07ad

File tree

19 files changed

+379
-0
lines changed

19 files changed

+379
-0
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
TOGETHER_API_KEY=<your-together-api-key>
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
.DS_Store
2+
.env
3+
poetry.lock
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# Quiuckstart Llama Hackathon
2+
3+
Restack AI - Streamlit + FastApi + TogetherAI with LlamaIndex Example
4+
5+
The AI workflow will search hacker news based on a query, crawl each project website, make summaries for the user
6+
7+
## Prerequisites
8+
9+
- Python 3.9 or higher
10+
- Poetry (for dependency management)
11+
- Docker (for running the Restack services)
12+
- Active [Together AI](https://together.ai) account with API key
13+
14+
## Usage
15+
16+
1. Run Restack local engine with Docker:
17+
18+
```bash
19+
docker run -d --pull always --name studio -p 5233:5233 -p 6233:6233 -p 7233:7233 ghcr.io/restackio/engine:main
20+
```
21+
22+
2. Open the Web UI to see the workflows:
23+
24+
```bash
25+
http://localhost:5233
26+
```
27+
28+
3. Clone this repository:
29+
30+
```bash
31+
git clone https://github.com/restackio/examples-python
32+
cd examples-python/examples/llama_quickstart
33+
```
34+
35+
4. Install dependencies using Poetry:
36+
37+
```bash
38+
poetry install
39+
```
40+
41+
5. Set up your environment variables:
42+
43+
Copy `.env.example` to `.env` and add your Together AI API key:
44+
45+
```bash
46+
cp .env.example .env
47+
# Edit .env and add your TOGETHER_API_KEY
48+
```
49+
50+
6. Run the services:
51+
52+
```bash
53+
poetry run services
54+
```
55+
56+
This will start the Restack service with the defined workflows and functions.
57+
58+
7. In a new terminal, run fastapi app:
59+
60+
```bash
61+
poetry run app
62+
```
63+
64+
8. In a new terminal, run the streamlit frontend
65+
66+
```bash
67+
poetry run streamlit run frontend.py
68+
```
69+
70+
9. You can test api endpoint without the streamlit UI with:
71+
72+
```bash
73+
curl -X POST \
74+
http://localhost:8000/api/schedule \
75+
-H "Content-Type: application/json" \
76+
-d '{"query": "AI", "count": 5}'
77+
```
78+
79+
This will schedule the workflow and return the result.
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import streamlit as st
2+
import requests
3+
4+
# Set page title and header
5+
st.title("LLama Hackathon Quickstart")
6+
st.text("FastAPI, Restack, Together AI, LLamaIndex")
7+
8+
# Create text area for user input with session state
9+
if "user_input" not in st.session_state:
10+
st.session_state.user_input = ""
11+
12+
query = st.text_input("Query HN", key="query", value="ai")
13+
count = st.number_input("Number of results", key="count", value=5)
14+
15+
# Initialize response history in session state
16+
if "response_history" not in st.session_state:
17+
st.session_state.response_history = []
18+
19+
# Create button to send request
20+
if st.button("Search HN"):
21+
if query:
22+
try:
23+
with st.spinner('Searching...'):
24+
# Make POST request to FastAPI backend
25+
response = requests.post(
26+
"http://localhost:8000/api/schedule",
27+
json={"query": query, "count": count}
28+
)
29+
30+
if response.status_code == 200:
31+
st.success("Response received!")
32+
# Add the new response to history with the original prompt
33+
st.session_state.response_history.append({
34+
"query": query,
35+
"count": count,
36+
"response": response.json()["result"]
37+
})
38+
else:
39+
st.error(f"Error: {response.status_code}")
40+
41+
except requests.exceptions.ConnectionError:
42+
st.error("Failed to connect to the server. Make sure the FastAPI server is running.")
43+
else:
44+
st.warning("Please enter a prompt before submitting.")
45+
46+
# Display response history
47+
if st.session_state.response_history:
48+
st.subheader("Response History")
49+
for i, item in enumerate(st.session_state.response_history, 1):
50+
st.markdown(f"**Query {i}:** {item['query']}")
51+
st.markdown(f"**Response {i}:** {item['response']}")
52+
st.markdown("---")
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
[tool.poetry]
2+
name = "streamlit_fastapi_togetherai_llama"
3+
version = "0.0.1"
4+
description = "A simple example showcasing streamlit, fastapi, togetherai and llamaindex"
5+
authors = [
6+
"Restack Team <[email protected]>",
7+
]
8+
readme = "README.md"
9+
packages = [{include = "src"}]
10+
11+
[tool.poetry.dependencies]
12+
python = "^3.12"
13+
pydantic = "^2.9.2"
14+
restack-ai = "^0.0.25"
15+
fastapi = "^0.115.4"
16+
llama-index = "^0.11.22"
17+
llama-index-llms-together = "^0.2.0"
18+
uvicorn = "^0.32.0"
19+
python-dotenv = "0.19"
20+
streamlit = "^1.40.0"
21+
requests = "^2.32.3"
22+
23+
[build-system]
24+
requires = ["poetry-core"]
25+
build-backend = "poetry.core.masonry.api"
26+
27+
[tool.poetry.scripts]
28+
services = "src.services:run_services"
29+
app = "src.app:run_app"

examples/llama_quickstart/src/__init__.py

Whitespace-only changes.
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
from fastapi import FastAPI, HTTPException
2+
from fastapi.middleware.cors import CORSMiddleware
3+
from pydantic import BaseModel
4+
import time
5+
from restack_ai import Restack
6+
import uvicorn
7+
8+
# Define request model
9+
class QueryRequest(BaseModel):
10+
query: str
11+
count: int
12+
13+
app = FastAPI()
14+
15+
# Add CORS middleware
16+
app.add_middleware(
17+
CORSMiddleware,
18+
allow_origins=["*"], # Adjust this in production
19+
allow_credentials=True,
20+
allow_methods=["*"],
21+
allow_headers=["*"],
22+
)
23+
24+
@app.get("/")
25+
async def home():
26+
return "Welcome to the TogetherAI LlamaIndex FastAPI App!"
27+
28+
@app.post("/api/schedule")
29+
async def schedule_workflow(request: QueryRequest):
30+
try:
31+
client = Restack()
32+
workflow_id = f"{int(time.time() * 1000)}-llm_complete_workflow"
33+
34+
runId = await client.schedule_workflow(
35+
workflow_name="hn_workflow",
36+
workflow_id=workflow_id,
37+
input={"query": request.query, "count": request.count}
38+
)
39+
print("Scheduled workflow", runId)
40+
41+
result = await client.get_workflow_result(
42+
workflow_id=workflow_id,
43+
run_id=runId
44+
)
45+
46+
return {
47+
"result": result,
48+
"workflow_id": workflow_id,
49+
"run_id": runId
50+
}
51+
except Exception as e:
52+
raise HTTPException(status_code=400, detail=str(e))
53+
54+
# Remove Flask-specific run code since FastAPI uses uvicorn
55+
def run_app():
56+
uvicorn.run("src.app:app", host="0.0.0.0", port=8000, reload=True)
57+
58+
if __name__ == '__main__':
59+
run_app()
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from restack_ai import Restack
2+
3+
client = Restack()

examples/llama_quickstart/src/functions/__init__.py

Whitespace-only changes.

examples/llama_quickstart/src/functions/crawl/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)