Thanks to visit codestin.com
Credit goes to github.com

Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

chore: sync dev to main #2028

Merged
merged 31 commits into from
Feb 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
6518c54
test: try 1
Feb 15, 2025
66a5cc7
test: try 2
Feb 15, 2025
24c3f2d
test: reorganize folder
Feb 20, 2025
08dd5db
test: revert wfl
Feb 20, 2025
2d294ac
Merge branch 'dev' into test/harry-reorganize-api-folder
Feb 20, 2025
f656c68
test: update log folder
Feb 20, 2025
6e58dec
test: move docker test to root
Feb 20, 2025
2ad71b8
Merge branch 'dev' into test/harry-reorganize-api-folder
LeVinhGithub Feb 20, 2025
4249234
test: revert change wfl
Feb 21, 2025
1b5d382
test: update path in wfl
Feb 21, 2025
4fe771a
test: update path, add-update utils
Feb 21, 2025
d3b05f6
test: add test
Feb 22, 2025
8a20ce7
test: update path
Feb 22, 2025
43a1b20
test: add tenacity to package
Feb 22, 2025
53e87d0
test: add more
Feb 23, 2025
835e68f
chore: consolidate subprocess utils (#2011)
gau-nernst Feb 24, 2025
650eb80
Merge branch 'dev' into test/harry-reorganize-api-folder
LeVinhGithub Feb 24, 2025
cb2feb9
Fix: Include algorithm header file to fix build on distro other than …
qnixsynapse Feb 24, 2025
b4164c6
fix: append stop words from request in case of using template rendere…
vansangpfiev Feb 24, 2025
fde47a9
Merge branch 'dev' into test/harry-reorganize-api-folder
LeVinhGithub Feb 24, 2025
ca68981
Format header includes
qnixsynapse Feb 24, 2025
a312585
Format includes on the rest of the files
qnixsynapse Feb 24, 2025
e29539b
Merge pull request #2022 from janhq/fix/include_algorithm
qnixsynapse Feb 24, 2025
2cf0650
Merge branch 'dev' into test/harry-reorganize-api-folder
LeVinhGithub Feb 24, 2025
845d3de
task: organize the API folder follow the swagger API #2006
LeVinhGithub Feb 24, 2025
752c2c5
fix: add filter by tag for repository list (#2021)
vansangpfiev Feb 25, 2025
54432ad
fix: block `command` field changes for python engine (#2007)
vansangpfiev Feb 25, 2025
a84b935
fix: filter out Intel GPUs (#2015)
vansangpfiev Feb 25, 2025
eddc1d5
chore: unit test (#2025)
vansangpfiev Feb 25, 2025
75ad69c
chore: update tests (#2027)
vansangpfiev Feb 25, 2025
cbca628
Merge branch 'dev' of https://github.com/janhq/cortex.cpp into s/chor…
sangjanai Feb 25, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions .github/workflows/cortex-cpp-quality-gate.yml
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ jobs:
cp build/cortex build/cortex-beta
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/main.py
python e2e-test/runner/main.py
rm build/cortex-nightly
rm build/cortex-beta
env:
Expand All @@ -182,7 +182,7 @@ jobs:
cp build/cortex.exe build/cortex-beta.exe
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/main.py
python e2e-test/runner/main.py
rm build/cortex-nightly.exe
rm build/cortex-beta.exe
env:
Expand All @@ -196,7 +196,7 @@ jobs:
cp build/cortex build/cortex-beta
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/cortex-llamacpp-e2e-nightly.py
python e2e-test/runner/cortex-llamacpp-e2e-nightly.py
rm build/cortex-nightly
rm build/cortex-beta
env:
Expand All @@ -210,7 +210,7 @@ jobs:
cp build/cortex.exe build/cortex-beta.exe
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/cortex-llamacpp-e2e-nightly.py
python e2e-test/runner/cortex-llamacpp-e2e-nightly.py
rm build/cortex-nightly.exe
rm build/cortex-beta.exe
env:
Expand Down Expand Up @@ -443,7 +443,7 @@ jobs:
cp build/cortex build/cortex-beta
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/main.py
python e2e-test/runner/main.py
rm build/cortex-nightly
rm build/cortex-beta
env:
Expand All @@ -457,7 +457,7 @@ jobs:
cp build/cortex.exe build/cortex-beta.exe
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/main.py
python e2e-test/runner/main.py
rm build/cortex-nightly.exe
rm build/cortex-beta.exe
env:
Expand All @@ -471,7 +471,7 @@ jobs:
cp build/cortex build/cortex-beta
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/cortex-llamacpp-e2e-nightly.py
python e2e-test/runner/cortex-llamacpp-e2e-nightly.py
rm build/cortex-nightly
rm build/cortex-beta
env:
Expand All @@ -485,7 +485,7 @@ jobs:
cp build/cortex.exe build/cortex-beta.exe
python -m pip install --upgrade pip
python -m pip install -r e2e-test/requirements.txt
python e2e-test/cortex-llamacpp-e2e-nightly.py
python e2e-test/runner/cortex-llamacpp-e2e-nightly.py
rm build/cortex-nightly.exe
rm build/cortex-beta.exe
env:
Expand Down
2 changes: 1 addition & 1 deletion docs/static/openapi/cortex.json
Original file line number Diff line number Diff line change
Expand Up @@ -5356,7 +5356,7 @@
"type": "string",
"description": "The identifier or URL of the model to use. It can be a model ID on Cortexso (https://huggingface.co/cortexso) or a HuggingFace URL pointing to the model file. For example: 'gpt2' or 'https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q2_K.gguf'",
"examples": [
"tinyllama:gguf",
"tinyllama:1b",
"https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/blob/main/mistral-7b-instruct-v0.1.Q2_K.gguf"
]
},
Expand Down
1 change: 1 addition & 0 deletions engine/common/download_task_queue.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#include <algorithm>
#include <condition_variable>
#include <deque>
#include <mutex>
Expand Down
9 changes: 7 additions & 2 deletions engine/controllers/models.cc
Original file line number Diff line number Diff line change
Expand Up @@ -385,6 +385,10 @@ void Models::UpdateModel(const HttpRequestPtr& req,
message = "Successfully update model ID '" + model_id +
"': " + json_body.toStyledString();
} else if (model_config.engine == kPythonEngine) {
// Block changes to `command`
if (json_body.isMember("command")) {
json_body.removeMember("command");
}
config::PythonModelConfig python_model_config;
python_model_config.ReadFromYaml(yaml_fp.string());
python_model_config.FromJson(json_body);
Expand Down Expand Up @@ -859,10 +863,11 @@ void Models::GetModelSource(
void Models::GetRepositoryList(
const HttpRequestPtr& req,
std::function<void(const HttpResponsePtr&)>&& callback,
std::optional<std::string> author) {
std::optional<std::string> author, std::optional<std::string> tag) {
if (!author.has_value())
author = "cortexso";
auto res = model_src_svc_->GetRepositoryList(author.value());
auto res =
model_src_svc_->GetRepositoryList(author.value(), tag.value_or(""));
if (res.has_error()) {
Json::Value ret;
ret["message"] = res.error();
Expand Down
7 changes: 4 additions & 3 deletions engine/controllers/models.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ class Models : public drogon::HttpController<Models, false> {
ADD_METHOD_TO(Models::DeleteModelSource, "/v1/models/sources", Delete);
ADD_METHOD_TO(Models::GetModelSources, "/v1/models/sources", Get);
ADD_METHOD_TO(Models::GetModelSource, "/v1/models/sources/{src}", Get);
ADD_METHOD_TO(Models::GetRepositoryList, "/v1/models/hub?author={author}",
Get);
ADD_METHOD_TO(Models::GetRepositoryList,
"/v1/models/hub?author={author}&tag={tag}", Get);
METHOD_LIST_END

explicit Models(std::shared_ptr<DatabaseService> db_service,
Expand Down Expand Up @@ -115,7 +115,8 @@ class Models : public drogon::HttpController<Models, false> {

void GetRepositoryList(const HttpRequestPtr& req,
std::function<void(const HttpResponsePtr&)>&& callback,
std::optional<std::string> author);
std::optional<std::string> author,
std::optional<std::string> tag);

private:
std::shared_ptr<DatabaseService> db_service_;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
import requests
import time
from test_runner import (
from utils.test_runner import (
start_server,
stop_server,
wait_for_websocket_download_success_event,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import requests
from test_runner import start_server, stop_server, get_latest_pre_release_tag
from utils.test_runner import start_server, stop_server, get_latest_pre_release_tag

latest_pre_release_tag = get_latest_pre_release_tag("janhq", "cortex.llamacpp")

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import requests
from test_runner import (
from utils.test_runner import (
start_server,
stop_server,
wait_for_websocket_download_success_event,
Expand Down
82 changes: 82 additions & 0 deletions engine/e2e-test/api/engines/test_api_get_default_engine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import pytest
import requests
from utils.test_runner import start_server, stop_server
import jsonschema
from tenacity import retry, wait_exponential, stop_after_attempt
from utils.logger import log_response
from utils.assertion import assert_equal


class TestApiDefaultEngine:

@pytest.fixture(autouse=True)
def setup_and_teardown(self):
# Setup
success = start_server()
if not success:
raise Exception("Failed to start server")

yield

# Teardown
stop_server()

def test_api_get_default_engine_successfully(self):
# Data test
engine= "llama-cpp"
name= "linux-amd64-avx-cuda-11-7"
version= "v0.1.35-27.10.24"

data = {"version": version, "variant": name}
post_install_url = f"http://localhost:3928/v1/engines/{engine}/install"
response = requests.post(
post_install_url, json=data
)
assert_equal(response.status_code,200)
log_response(response.json(), "test_api_get_default_engine_successfully")

get_list_url = f"http://localhost:3928/v1/engines/{engine}"
get_default_url = f"http://localhost:3928/v1/engines/{engine}/default"

@retry(
wait=wait_exponential(multiplier=2, min=2, max=30),
stop=stop_after_attempt(5)
)
def get_request(url):
response = requests.get(url)
assert len(response.json()) > 0

get_request(get_list_url)

response_default_engine = requests.get(get_default_url)
json_data = response_default_engine.json()

log_response(json_data, "test_api_get_default_engine_successfully")
assert_equal(response_default_engine.status_code, 200)

schema = {
"type": "object",
"properties": {
"engine": {"type": "string"},
"variant": {"type": "string"},
"version": {"type": "string"}
},
"required": ["engine", "variant", "version"]
}

# Validate response schema
jsonschema.validate(instance=json_data, schema=schema)

def test_api_get_default_engine_failed_invalid_engine(self):
# Data test
engine= "invalid"

get_default_url = f"http://localhost:3928/v1/engines/{engine}/default"

response_default_engine = requests.get(get_default_url)
json_data_get_default = response_default_engine.json()

log_response(json_data_get_default, "test_api_get_default_engine_failed_invalid_engine")
assert_equal(response_default_engine.status_code, 400)

assert_equal(json_data_get_default["message"], f"Engine {engine} is not supported yet!")
76 changes: 76 additions & 0 deletions engine/e2e-test/api/engines/test_api_get_engine_release.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import pytest
import requests
from utils.test_runner import start_server, stop_server
import jsonschema
from tenacity import retry, wait_exponential, stop_after_attempt
from utils.logger import log_response
from utils.assertion import assert_equal, assert_contains


class TestApiEngineRelease:

@pytest.fixture(autouse=True)
def setup_and_teardown(self):
# Setup
success = start_server()
if not success:
raise Exception("Failed to start server")

yield

# Teardown
stop_server()

def test_api_get_engine_release_successfully(self):
# Data test
engine= "llama-cpp"
get_release_url = f"http://localhost:3928/v1/engines/{engine}/releases"

@retry(
wait=wait_exponential(multiplier=2, min=2, max=30),
stop=stop_after_attempt(5)
)
def get_request(url):
response = requests.get(url)
assert len(response.json()) > 0

get_request(get_release_url)

response_engine_release = requests.get(get_release_url)
json_data = response_engine_release.json()

log_response(json_data, "test_api_get_engine_release_successfully")
assert_equal(response_engine_release.status_code, 200)

schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "array",
"items": {
"type": "object",
"properties": {
"draft": { "type": "boolean" },
"name": { "type": "string" },
"prerelease": { "type": "boolean" },
"published_at": { "type": "string", "format": "date-time" },
"url": { "type": "string", "format": "uri" }
},
"required": ["draft", "name", "prerelease", "published_at", "url"]
}
}

# Validate response schema
jsonschema.validate(instance=json_data, schema=schema)

def test_api_ge_engine_release_failed_invalid_engine(self):
# Data test
engine= "invalid"

get_default_url = f"http://localhost:3928/v1/engines/{engine}/releases"

response_default_engine = requests.get(get_default_url)
json_data_get_default = response_default_engine.json()

log_response(json_data_get_default, "test_api_ge_engine_release_failed_invalid_engine")
assert_equal(response_default_engine.status_code, 400)

assert_contains(json_data_get_default["message"], "Not Found")
73 changes: 73 additions & 0 deletions engine/e2e-test/api/engines/test_api_get_engine_release_latest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import pytest
import requests
from utils.test_runner import start_server, stop_server
import jsonschema
from tenacity import retry, wait_exponential, stop_after_attempt
from utils.logger import log_response
from utils.assertion import assert_equal, assert_contains


class TestApiEngineReleaseLatest:

@pytest.fixture(autouse=True)
def setup_and_teardown(self):
# Setup
success = start_server()
if not success:
raise Exception("Failed to start server")

yield

# Teardown
stop_server()

def test_api_get_engine_release_latest_successfully(self):
# Data test
engine= "llama-cpp"
get_release_url = f"http://localhost:3928/v1/engines/{engine}/releases/latest"

@retry(
wait=wait_exponential(multiplier=2, min=2, max=30),
stop=stop_after_attempt(5)
)
def get_request(url):
response = requests.get(url)
assert len(response.json()) > 0

get_request(get_release_url)

response_engine_release = requests.get(get_release_url)
json_data = response_engine_release.json()

log_response(json_data, "test_api_get_engine_release_latest_successfully")
assert_equal(response_engine_release.status_code, 200)

schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "array",
"items": {
"type": "object",
"properties": {
"created_at": {
"type": "string",
"format": "date-time"
},
"download_count": {
"type": "integer",
"minimum": 0
},
"name": {
"type": "string"
},
"size": {
"type": "integer",
"minimum": 0
}
},
"required": ["created_at", "download_count", "name", "size"]
}
}


# Validate response schema
jsonschema.validate(instance=json_data, schema=schema)
Loading
Loading