#!
/usr/bin/env python3
# Standard library imports
import sys
import subprocess
import os
import ctypes
import json
import datetime
import base64
import time
# Determine python_exe (use python.exe on Windows for console output)
if os.name == 'nt':
python_exe = os.path.join(os.path.dirname(sys.executable), 'python.exe')
else:
python_exe = sys.executable
# Upgrade setuptools and wheel
subprocess.check_call([python_exe, "-m", "pip", "install", "--upgrade",
"setuptools", "wheel"])
# Define module to package mapping
module_to_package = {
"gradio": "gradio",
"requests": "requests",
"yaml": "pyyaml"
}
# Define specific package versions
package_versions = {
"gradio": "5.27.0", # Compatible with Python 3.11
}
def ensure_module(module_name):
package_name = module_to_package.get(module_name, module_name)
version = package_versions.get(package_name, None)
if version:
package_name = f"{package_name}=={version}"
try:
__import__(module_name)
except ImportError:
print(f"🚀 Installing missing package: {package_name}")
result = subprocess.run([python_exe, "-m", "pip", "install", "--no-cache-
dir", package_name], capture_output=True, text=True)
if result.returncode != 0:
print(f"❌ Failed to install {package_name}: {result.stderr}")
return False
print(f"✅ Installed {package_name}")
try:
__import__(module_name)
except ImportError:
print(f"❌ Still cannot import {module_name} after installation")
return False
return True
# List of required modules
required_modules = ["gradio", "requests", "yaml"]
# Ensure all required modules are available
for module in required_modules:
if not ensure_module(module):
print(f"Cannot continue without {module}")
sys.exit(1)
# Now import the modules
import gradio as gr
import requests
import yaml
# --- Permission checks ---
def is_admin():
"""Check for Windows admin rights."""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def can_write(path="."):
"""Test write access by creating and deleting a temp file."""
try:
test = os.path.join(path, ".perm_test")
with open(test, "w") as f:
f.write("test")
os.remove(test)
return True
except Exception:
return False
if os.name == "nt" and not is_admin():
sys.exit("❌ Please run this script as Administrator on Windows.")
if not can_write(os.getcwd()):
sys.exit("❌ No write permission in current directory.")
# --- Load or write config ---
CONFIG_FILE = "config.yaml"
if not os.path.exists(CONFIG_FILE):
cfg = {
"openrouter_api_key": "sk-or-v1-
60dfcea14e450a1814ec04b0e23d8c3d16aa40f456f389d16b047a36fc5efe27",
"huggingface_api_key": "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",
"stablediffusionapi_key":
"b5dpdS4tlHvltSmkXqNid1OtXyfKYzfrJ1hESJL3bYAfn8zLNuRkzZlDmp5a",
"meshy_api_key": "msy_HbN5roLvXUrd1QSqqCZoY03PBjoM45jKhk0i",
"beatoven_api_key": "IbvJrVBttIIDSLpeUtht1Q"
}
with open(CONFIG_FILE, "w") as f:
yaml.safe_dump(cfg, f)
config = yaml.safe_load(open(CONFIG_FILE))
# --- History utilities ---
HISTORY_FILE = "history.json"
def save_history(entry):
if not os.path.exists(HISTORY_FILE):
with open(HISTORY_FILE, "w") as f:
json.dump([], f)
try:
hist = json.load(open(HISTORY_FILE))
except json.JSONDecodeError:
hist = [] # Reset to empty list if file is corrupted
hist.append(entry)
with open(HISTORY_FILE, "w") as f:
json.dump(hist, f, indent=4)
def load_history():
try:
return json.load(open(HISTORY_FILE)) if os.path.exists(HISTORY_FILE) else
[]
except json.JSONDecodeError:
return [] # Return empty list if file is corrupted
# --- Fetch free models dynamically ---
def fetch_free_models():
url = "https://openrouter.ai/api/v1/models"
hdr = {"Authorization": f"Bearer {config['openrouter_api_key']}"}
try:
data = requests.get(url, headers=hdr, timeout=10).json().get("data", [])
free = [
m["id"] for m in data
if m.get("pricing", {}).get("request") == "0"
and "text" in m.get("architecture", {}).get("input_modalities", [])
]
return free
except requests.RequestException as e:
print(f"❌ Error fetching free models: {e}")
return []
# Fetch models at startup
all_models = fetch_free_models()
# --- Core functionalities ---
def chat_with_model(prompt, selected_models):
url = "https://openrouter.ai/api/v1/chat/completions"
hdr = {
"Authorization": f"Bearer {config['openrouter_api_key']}",
"Content-Type": "application/json"
}
for model in selected_models:
try:
resp = requests.post(
url,
headers=hdr,
json={"model": model, "messages": [{"role": "user", "content":
prompt}]},
timeout=30
)
if resp.status_code == 200:
txt = resp.json()["choices"][0]["message"]["content"]
save_history({
"type": "chat",
"model": model,
"prompt": prompt,
"response": txt,
"timestamp": datetime.datetime.now().isoformat()
})
return txt
except requests.RequestException as e:
print(f"❌ Error with model {model}: {e}")
continue
return "❌ All selected models failed."
def generate_image(prompt, use_inpaint, init_img, mask_img):
if use_inpaint:
url = "https://stablediffusionapi.com/api/v3/inpaint"
if not init_img or not mask_img:
return "❌ Initial image and mask required for inpainting."
try:
with open(init_img.name, "rb") as f:
init_img_b64 = base64.b64encode(f.read()).decode("utf-8")
with open(mask_img.name, "rb") as f:
mask_img_b64 = base64.b64encode(f.read()).decode("utf-8")
payload = {
"key": config["stablediffusionapi_key"],
"prompt": prompt,
"init_image": init_img_b64,
"mask_image": mask_img_b64,
"width": "512",
"height": "512",
"samples": "1",
"num_inference_steps": "30",
"safety_checker": "no",
"enhance_prompt": "yes",
"guidance_scale": 7.5,
"strength": 0.7,
"base64": "yes"
}
resp = requests.post(url, json=payload, timeout=60)
if resp.status_code == 200 and resp.json().get("status") == "success":
fn = f"img_{datetime.datetime.now():%Y%m%d%H%M%S}.png"
img_data = base64.b64decode(resp.json()["output"][0])
with open(fn, "wb") as f:
f.write(img_data)
save_history({
"type": "image",
"prompt": prompt,
"file": fn,
"inpainting": True,
"timestamp": datetime.datetime.now().isoformat()
})
return fn
return f"❌ Inpainting failed: {resp.status_code} - {resp.text}"
except requests.RequestException as e:
return f"❌ Inpainting request failed: {e}"
else:
url = "https://api-inference.huggingface.co/models/stabilityai/stable-
diffusion-2-1"
hdr = {"Authorization": f"Bearer {config['huggingface_api_key']}"}
try:
resp = requests.post(url, headers=hdr, json={"inputs": prompt},
timeout=60)
if resp.status_code == 200:
fn = f"img_{datetime.datetime.now():%Y%m%d%H%M%S}.png"
with open(fn, "wb") as f:
f.write(resp.content)
save_history({
"type": "image",
"prompt": prompt,
"file": fn,
"inpainting": False,
"timestamp": datetime.datetime.now().isoformat()
})
return fn
return f"❌ Image generation failed: {resp.status_code} - {resp.text}"
except requests.RequestException as e:
return f"❌ Image generation request failed: {e}"
def generate_video(prompt):
# Note: This may not work as expected with Hugging Face's API. Consider
replacing with a dedicated video API.
url = "https://api-inference.huggingface.co/models/damo-vilab/text-to-video-ms-
1.7b"
hdr = {"Authorization": f"Bearer {config['huggingface_api_key']}"}
try:
resp = requests.post(url, headers=hdr, json={"inputs": prompt}, timeout=60)
if resp.status_code == 200:
fn = f"vid_{datetime.datetime.now():%Y%m%d%H%M%S}.mp4"
with open(fn, "wb") as f:
f.write(resp.content)
save_history({
"type": "video",
"prompt": prompt,
"file": fn,
"timestamp": datetime.datetime.now().isoformat()
})
return fn
return f"❌ Video generation failed: {resp.status_code} - {resp.text}"
except requests.RequestException as e:
return f"❌ Video generation request failed: {e}"
def generate_music(prompt):
beatoven_api_key = config["beatoven_api_key"]
url = "https://api.beatoven.ai/v1/compose"
headers = {
"Authorization": f"Bearer {beatoven_api_key}",
"content-type": "application/json"
}
data = {
"prompt": {"text": prompt},
"format": "mp3",
"looping": False
}
try:
response = requests.post(url, headers=headers, json=data, timeout=60)
if response.status_code == 200:
task_id = response.json()["task_id"]
while True:
status_url = f"https://api.beatoven.ai/v1/tasks/{task_id}"
status_response = requests.get(status_url, headers=headers,
timeout=10)
if status_response.status_code == 200:
status = status_response.json()["status"]
if status == "composed":
track_url = status_response.json()["metadata"]["track_url"]
save_history({
"type": "music",
"prompt": prompt,
"url": track_url,
"timestamp": datetime.datetime.now().isoformat()
})
return track_url
elif status in ["composing", "running"]:
time.sleep(5)
else:
return "❌ Error: Composition failed"
else:
return f"❌ Error: Failed to get task status -
{status_response.status_code} - {status_response.text}"
return f"❌ Error: Failed to compose track - {response.status_code} -
{response.text}"
except requests.RequestException as e:
return f"❌ Music generation request failed: {e}"
def generate_3d(prompt, image=None):
headers = {
"Authorization": f"Bearer {config['meshy_api_key']}",
"Content-Type": "application/json"
}
if image:
url = "https://api.meshy.ai/v1/image-to-3d"
try:
with open(image.name, "rb") as f:
image_b64 = base64.b64encode(f.read()).decode("utf-8")
payload = {
"image": image_b64,
"stage": "preview"
}
except Exception as e:
return f"❌ Error encoding image: {e}"
else:
url = "https://api.meshy.ai/v1/text-to-3d"
payload = {
"prompt": prompt,
"stage": "preview"
}
try:
resp = requests.post(url, json=payload, headers=headers, timeout=60)
if resp.status_code == 200:
fn = f"3d_{datetime.datetime.now():%Y%m%d%H%M%S}.glb"
with open(fn, "wb") as f:
f.write(resp.content)
save_history({
"type": "3d",
"prompt": prompt if not image else "Image provided",
"file": fn,
"image_used": bool(image),
"timestamp": datetime.datetime.now().isoformat()
})
return fn
return f"❌ 3D generation failed: {resp.status_code} - {resp.text}"
except requests.RequestException as e:
return f"❌ 3D generation request failed: {e}"
def trigger_n8n(webhook, payload):
try:
st = requests.post(webhook, json=json.loads(payload),
timeout=10).status_code
if st == 200:
save_history({
"type": "automation",
"webhook": webhook,
"payload": payload,
"status": st,
"timestamp": datetime.datetime.now().isoformat()
})
return "OK"
return f"Failed({st})"
except Exception as e:
save_history({
"type": "automation",
"webhook": webhook,
"payload": payload,
"status": f"Error: {e}",
"timestamp": datetime.datetime.now().isoformat()
})
return f"Failed(Error: {e})"
def render_history():
hist = load_history()[::-1]
md = ""
for i, e in enumerate(hist, 1):
md += f"**{i}. {e['type'].capitalize()} @ {e['timestamp']}** \n"
if e["type"] == "chat":
md += f"- Model: {e['model']} \n- Prompt: {e['prompt']} \n- Response:
{e['response']}\n\n"
elif e["type"] in ("image", "video", "3d"):
md += f"- Prompt: {e['prompt']} \n- [Download]({e['file']})\n"
if e["type"] == "image":
md += f"- Inpainting: {e['inpainting']}\n\n"
elif e["type"] == "3d":
md += f"- Image Used: {e['image_used']}\n\n"
else:
md += "\n"
else:
key = "url" if "url" in e else "payload"
md += f"- {key.capitalize()}: {e[key]}\n\n"
return md
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("## GenAI Launcher ⚡\n(Deps auto-installed • Admin & write-check •
History • Download)")
with gr.Tab("Chat"):
model_selection = gr.CheckboxGroup(
choices=all_models,
label="Select Models",
value=[all_models[0]] if all_models else [],
interactive=True
)
inp = gr.Textbox(label="Prompt")
out = gr.Textbox(label="Reply")
gr.Button("Send").click(chat_with_model, inputs=[inp, model_selection],
outputs=out)
with gr.Tab("Image"):
inp = gr.Textbox(label="Prompt")
use_inpaint = gr.Checkbox(label="Use Inpainting", value=False)
init_img = gr.File(label="Initial Image", visible=False)
mask_img = gr.File(label="Mask Image", visible=False)
out = gr.File(label="Download Image")
use_inpaint.change(
lambda x: [gr.update(visible=x) for _ in range(2)],
use_inpaint,
[init_img, mask_img]
)
gr.Button("Gen").click(generate_image, [inp, use_inpaint, init_img,
mask_img], out)
with gr.Tab("Video"):
inp = gr.Textbox(label="Prompt")
out = gr.File(label="Download Video")
gr.Button("Gen").click(generate_video, inp, out)
with gr.Tab("Music"):
inp = gr.Textbox(label="Prompt")
out = gr.Textbox(label="Stream URL")
gr.Button("Gen").click(generate_music, inp, out)
with gr.Tab("3D"):
inp = gr.Textbox(label="Prompt")
img = gr.File(label="Upload Image (Optional)")
out = gr.File(label="Download 3D Model")
gr.Button("Gen").click(generate_3d, [inp, img], out)
with gr.Tab("Automation"):
u = gr.Textbox(label="Webhook URL")
p = gr.Textbox(label="JSON Payload")
r = gr.Textbox(label="Result")
gr.Button("Trigger").click(trigger_n8n, [u, p], r)
with gr.Tab("History"):
h = gr.Markdown()
gr.Button("Refresh").click(render_history, outputs=h)
demo.launch()