Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit ccab65b

Browse files
cebtenzzrephymbert
authored andcommitted
server tests : more pythonic process management; fix bare except: (ggml-org#6146)
* server tests : remove seemingly redundant newlines in print() * server tests : use built-in subprocess features, not os.kill and psutil * server tests : do not catch e.g. SystemExit; use print_exc * server tests: handle TimeoutExpired exception * server tests: fix connect on dual-stack systems * server: tests: add new tokens regex on windows generated following new repeat penalties default changed in (ggml-org#6127) * server: tests: remove the hack on windows since now we get the good socket family * server: tests: add new tokens regex following new repeat penalties default changed in (ggml-org#6127) * server: tests: add new tokens regex following new repeat penalties default changed in (ggml-org#6127) --------- Co-authored-by: Pierrick HYMBERT <[email protected]>
1 parent 27160fc commit ccab65b

File tree

4 files changed

+46
-76
lines changed

4 files changed

+46
-76
lines changed

examples/server/tests/features/environment.py

+22-52
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,14 @@
55
import time
66
import traceback
77
from contextlib import closing
8-
9-
import psutil
8+
from subprocess import TimeoutExpired
109

1110

1211
def before_scenario(context, scenario):
1312
context.debug = 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON'
1413
if context.debug:
15-
print("DEBUG=ON\n")
16-
print(f"\x1b[33;42mStarting new scenario: {scenario.name}!\x1b[0m\n")
14+
print("DEBUG=ON")
15+
print(f"\x1b[33;42mStarting new scenario: {scenario.name}!\x1b[0m")
1716
port = 8080
1817
if 'PORT' in os.environ:
1918
port = int(os.environ['PORT'])
@@ -27,75 +26,46 @@ def after_scenario(context, scenario):
2726
return
2827
if scenario.status == "failed":
2928
if 'GITHUB_ACTIONS' in os.environ:
30-
print(f"\x1b[33;101mSCENARIO FAILED: {scenario.name} server logs:\x1b[0m\n\n")
29+
print(f"\x1b[33;101mSCENARIO FAILED: {scenario.name} server logs:\x1b[0m\n")
3130
if os.path.isfile('llama.log'):
3231
with closing(open('llama.log', 'r')) as f:
3332
for line in f:
3433
print(line)
3534
if not is_server_listening(context.server_fqdn, context.server_port):
36-
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m\n")
35+
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m")
3736

38-
if not pid_exists(context.server_process.pid):
37+
if context.server_process.poll() is not None:
3938
assert False, f"Server not running pid={context.server_process.pid} ..."
4039

41-
server_graceful_shutdown(context)
40+
server_graceful_shutdown(context) # SIGINT
4241

43-
# Wait few for socket to free up
44-
time.sleep(0.05)
42+
try:
43+
context.server_process.wait(0.5)
44+
except TimeoutExpired:
45+
print(f"server still alive after 500ms, force-killing pid={context.server_process.pid} ...")
46+
context.server_process.kill() # SIGKILL
47+
context.server_process.wait()
4548

46-
attempts = 0
47-
while pid_exists(context.server_process.pid) or is_server_listening(context.server_fqdn, context.server_port):
48-
server_kill(context)
49+
while is_server_listening(context.server_fqdn, context.server_port):
4950
time.sleep(0.1)
50-
attempts += 1
51-
if attempts > 5:
52-
server_kill_hard(context)
53-
except:
54-
exc = sys.exception()
55-
print("error in after scenario: \n")
56-
print(exc)
57-
print("*** print_tb: \n")
58-
traceback.print_tb(exc.__traceback__, file=sys.stdout)
51+
except Exception:
52+
print("ignoring error in after_scenario:")
53+
traceback.print_exc(file=sys.stdout)
5954

6055

6156
def server_graceful_shutdown(context):
62-
print(f"shutting down server pid={context.server_process.pid} ...\n")
57+
print(f"shutting down server pid={context.server_process.pid} ...")
6358
if os.name == 'nt':
64-
os.kill(context.server_process.pid, signal.CTRL_C_EVENT)
59+
interrupt = signal.CTRL_C_EVENT
6560
else:
66-
os.kill(context.server_process.pid, signal.SIGINT)
67-
68-
69-
def server_kill(context):
70-
print(f"killing server pid={context.server_process.pid} ...\n")
71-
context.server_process.kill()
72-
73-
74-
def server_kill_hard(context):
75-
pid = context.server_process.pid
76-
path = context.server_path
77-
78-
print(f"Server dangling exits, hard killing force {pid}={path}...\n")
79-
try:
80-
psutil.Process(pid).kill()
81-
except psutil.NoSuchProcess:
82-
return False
83-
return True
61+
interrupt = signal.SIGINT
62+
context.server_process.send_signal(interrupt)
8463

8564

8665
def is_server_listening(server_fqdn, server_port):
8766
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
8867
result = sock.connect_ex((server_fqdn, server_port))
8968
_is_server_listening = result == 0
9069
if _is_server_listening:
91-
print(f"server is listening on {server_fqdn}:{server_port}...\n")
70+
print(f"server is listening on {server_fqdn}:{server_port}...")
9271
return _is_server_listening
93-
94-
95-
def pid_exists(pid):
96-
try:
97-
psutil.Process(pid)
98-
except psutil.NoSuchProcess:
99-
return False
100-
return True
101-

examples/server/tests/features/server.feature

+7-7
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,9 @@ Feature: llama.cpp server
3535
And metric llamacpp:tokens_predicted is <n_predicted>
3636

3737
Examples: Prompts
38-
| prompt | n_predict | re_content | n_prompt | n_predicted | truncated |
39-
| I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not |
40-
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids)+ | 46 | 64 | not |
38+
| prompt | n_predict | re_content | n_prompt | n_predicted | truncated |
39+
| I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not |
40+
| Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 46 | 64 | not |
4141

4242
Scenario: Completion prompt truncated
4343
Given a prompt:
@@ -48,7 +48,7 @@ Feature: llama.cpp server
4848
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
4949
"""
5050
And a completion request with no api error
51-
Then 64 tokens are predicted matching fun|Annaks|popcorns|pictry
51+
Then 64 tokens are predicted matching fun|Annaks|popcorns|pictry|bowl
5252
And the completion is truncated
5353
And 109 prompt tokens are processed
5454

@@ -65,9 +65,9 @@ Feature: llama.cpp server
6565
And the completion is <truncated> truncated
6666

6767
Examples: Prompts
68-
| model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated |
69-
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not |
70-
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird)+ | -1 | 64 | enabled | |
68+
| model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated |
69+
| llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not |
70+
| codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|Annabyear)+ | -1 | 64 | enabled | |
7171

7272

7373
Scenario: Tokenize / Detokenize

examples/server/tests/features/steps/steps.py

+17-16
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def step_server_config(context, server_fqdn, server_port):
6666
def step_download_hf_model(context, hf_file, hf_repo):
6767
context.model_file = hf_hub_download(repo_id=hf_repo, filename=hf_file)
6868
if context.debug:
69-
print(f"model file: {context.model_file}\n")
69+
print(f"model file: {context.model_file}")
7070

7171

7272
@step('a model file {model_file}')
@@ -137,9 +137,12 @@ def step_start_server(context):
137137
if 'GITHUB_ACTIONS' in os.environ:
138138
max_attempts *= 2
139139

140+
addrs = socket.getaddrinfo(context.server_fqdn, context.server_port, type=socket.SOCK_STREAM)
141+
family, typ, proto, _, sockaddr = addrs[0]
142+
140143
while True:
141-
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
142-
result = sock.connect_ex((context.server_fqdn, context.server_port))
144+
with closing(socket.socket(family, typ, proto)) as sock:
145+
result = sock.connect_ex(sockaddr)
143146
if result == 0:
144147
print("\x1b[33;46mserver started!\x1b[0m")
145148
return
@@ -209,7 +212,7 @@ async def step_request_completion(context, api_error):
209212
user_api_key=context.user_api_key)
210213
context.tasks_result.append(completion)
211214
if context.debug:
212-
print(f"Completion response: {completion}\n")
215+
print(f"Completion response: {completion}")
213216
if expect_api_error:
214217
assert completion == 401, f"completion must be an 401 status code: {completion}"
215218

@@ -354,7 +357,7 @@ def step_prompt_passkey(context, passkey, i_pos):
354357
prompt += context.prompt_junk_suffix
355358
if context.debug:
356359
passkey_highlight = "\x1b[33m" + passkey + "\x1b[0m"
357-
print(f"Passkey challenge:\n```{prompt.replace(passkey, passkey_highlight)}```\n")
360+
print(f"Passkey challenge:\n```{prompt.replace(passkey, passkey_highlight)}```")
358361
context.prompts.append(context.prompt_prefix + prompt + context.prompt_suffix)
359362
context.n_prompts = len(context.prompts)
360363

@@ -363,7 +366,7 @@ def step_prompt_passkey(context, passkey, i_pos):
363366
@async_run_until_complete
364367
async def step_oai_chat_completions(context, api_error):
365368
if context.debug:
366-
print(f"Submitting OAI compatible completions request...\n")
369+
print(f"Submitting OAI compatible completions request...")
367370
expect_api_error = api_error == 'raised'
368371
completion = await oai_chat_completions(context.prompts.pop(),
369372
context.system_prompt,
@@ -508,12 +511,12 @@ async def step_all_embeddings_are_the_same(context):
508511
embedding1 = np.array(embeddings[i])
509512
embedding2 = np.array(embeddings[j])
510513
if context.debug:
511-
print(f"embedding1: {embedding1[-8:]}\n")
512-
print(f"embedding2: {embedding2[-8:]}\n")
514+
print(f"embedding1: {embedding1[-8:]}")
515+
print(f"embedding2: {embedding2[-8:]}")
513516
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
514517
msg = f"Similarity between {i} and {j}: {similarity:.10f}"
515518
if context.debug:
516-
print(f"{msg}\n")
519+
print(f"{msg}")
517520
assert np.isclose(similarity, 1.0, rtol=1e-05, atol=1e-08, equal_nan=False), msg
518521

519522

@@ -630,7 +633,7 @@ async def step_prometheus_metrics_exported(context):
630633
metrics_raw = await metrics_response.text()
631634
metric_exported = False
632635
if context.debug:
633-
print(f"/metrics answer:\n{metrics_raw}\n")
636+
print(f"/metrics answer:\n{metrics_raw}")
634637
context.metrics = {}
635638
for metric in parser.text_string_to_metric_families(metrics_raw):
636639
match metric.name:
@@ -932,7 +935,7 @@ def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re
932935
last_match = end
933936
highlighted += content[last_match:]
934937
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
935-
print(f"Checking completion response: {highlighted}\n")
938+
print(f"Checking completion response: {highlighted}")
936939
assert last_match > 0, f'/{re_content}/ must match ```{highlighted}```'
937940
if expected_predicted_n and expected_predicted_n > 0:
938941
assert n_predicted == expected_predicted_n, (f'invalid number of tokens predicted:'
@@ -942,7 +945,7 @@ def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re
942945
async def gather_tasks_results(context):
943946
n_tasks = len(context.concurrent_tasks)
944947
if context.debug:
945-
print(f"Waiting for all {n_tasks} tasks results...\n")
948+
print(f"Waiting for all {n_tasks} tasks results...")
946949
for task_no in range(n_tasks):
947950
context.tasks_result.append(await context.concurrent_tasks.pop())
948951
n_completions = len(context.tasks_result)
@@ -959,7 +962,7 @@ async def wait_for_health_status(context,
959962
slots_processing=None,
960963
expected_slots=None):
961964
if context.debug:
962-
print(f"Starting checking for health for expected_health_status={expected_health_status}\n")
965+
print(f"Starting checking for health for expected_health_status={expected_health_status}")
963966
interval = 0.5
964967
counter = 0
965968
if 'GITHUB_ACTIONS' in os.environ:
@@ -1048,8 +1051,6 @@ def start_server_background(context):
10481051
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
10491052
context.server_path = os.environ['LLAMA_SERVER_BIN_PATH']
10501053
server_listen_addr = context.server_fqdn
1051-
if os.name == 'nt':
1052-
server_listen_addr = '0.0.0.0'
10531054
server_args = [
10541055
'--host', server_listen_addr,
10551056
'--port', context.server_port,
@@ -1088,7 +1089,7 @@ def start_server_background(context):
10881089
server_args.append('--verbose')
10891090
if 'SERVER_LOG_FORMAT_JSON' not in os.environ:
10901091
server_args.extend(['--log-format', "text"])
1091-
print(f"starting server with: {context.server_path} {server_args}\n")
1092+
print(f"starting server with: {context.server_path} {server_args}")
10921093
flags = 0
10931094
if 'nt' == os.name:
10941095
flags |= subprocess.DETACHED_PROCESS

examples/server/tests/requirements.txt

-1
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,4 @@ behave~=1.2.6
33
huggingface_hub~=0.20.3
44
numpy~=1.24.4
55
openai~=0.25.0
6-
psutil~=5.9.8
76
prometheus-client~=0.20.0

0 commit comments

Comments
 (0)