Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit d29b81a

Browse files
committed
fix(examples): use openai_embed.func to prevent double EmbeddingFunc wrapping
openai_embed is an EmbeddingFunc instance (decorated with @wrap_embedding_func_with_attrs). Wrapping it inside a lambda and passing it to another EmbeddingFunc causes double invocation of EmbeddingFunc.__call__, which doubles the returned vector count and raises "Vector count mismatch: expected N vectors but got 2N vectors". Using openai_embed.func accesses the unwrapped async function directly, as documented in lightrag-hku's own codebase. Fixes the root cause of HKUDS/LightRAG#2549.
1 parent 5c53c4c commit d29b81a

5 files changed

Lines changed: 11 additions & 11 deletions

File tree

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ async def main():
409409
embedding_func = EmbeddingFunc(
410410
embedding_dim=3072,
411411
max_token_size=8192,
412-
func=lambda texts: openai_embed(
412+
func=lambda texts: openai_embed.func(
413413
texts,
414414
model="text-embedding-3-large",
415415
api_key=api_key,
@@ -485,7 +485,7 @@ async def process_multimodal_content():
485485
embedding_func=EmbeddingFunc(
486486
embedding_dim=3072,
487487
max_token_size=8192,
488-
func=lambda texts: openai_embed(
488+
func=lambda texts: openai_embed.func(
489489
texts,
490490
model="text-embedding-3-large",
491491
api_key=api_key,
@@ -711,7 +711,7 @@ async def load_existing_lightrag():
711711
embedding_func=EmbeddingFunc(
712712
embedding_dim=3072,
713713
max_token_size=8192,
714-
func=lambda texts: openai_embed(
714+
func=lambda texts: openai_embed.func(
715715
texts,
716716
model="text-embedding-3-large",
717717
api_key=api_key,
@@ -874,7 +874,7 @@ async def insert_content_list_example():
874874
embedding_func = EmbeddingFunc(
875875
embedding_dim=3072,
876876
max_token_size=8192,
877-
func=lambda texts: openai_embed(
877+
func=lambda texts: openai_embed.func(
878878
texts,
879879
model="text-embedding-3-large",
880880
api_key=api_key,

README_zh.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@ async def main():
389389
embedding_func = EmbeddingFunc(
390390
embedding_dim=3072,
391391
max_token_size=8192,
392-
func=lambda texts: openai_embed(
392+
func=lambda texts: openai_embed.func(
393393
texts,
394394
model="text-embedding-3-large",
395395
api_key=api_key,
@@ -467,7 +467,7 @@ async def process_multimodal_content():
467467
embedding_func=EmbeddingFunc(
468468
embedding_dim=3072,
469469
max_token_size=8192,
470-
func=lambda texts: openai_embed(
470+
func=lambda texts: openai_embed.func(
471471
texts,
472472
model="text-embedding-3-large",
473473
api_key=api_key,
@@ -692,7 +692,7 @@ async def load_existing_lightrag():
692692
embedding_func=EmbeddingFunc(
693693
embedding_dim=3072,
694694
max_token_size=8192,
695-
func=lambda texts: openai_embed(
695+
func=lambda texts: openai_embed.func(
696696
texts,
697697
model="text-embedding-3-large",
698698
api_key=api_key,
@@ -855,7 +855,7 @@ async def insert_content_list_example():
855855
embedding_func = EmbeddingFunc(
856856
embedding_dim=3072,
857857
max_token_size=8192,
858-
func=lambda texts: openai_embed(
858+
func=lambda texts: openai_embed.func(
859859
texts,
860860
model="text-embedding-3-large",
861861
api_key=api_key,

examples/insert_content_list_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ def vision_model_func(
252252
embedding_func = EmbeddingFunc(
253253
embedding_dim=embedding_dim,
254254
max_token_size=8192,
255-
func=lambda texts: openai_embed(
255+
func=lambda texts: openai_embed.func(
256256
texts,
257257
model=embedding_model,
258258
api_key=api_key,

examples/modalprocessors_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ async def initialize_rag(api_key: str, base_url: str = None):
175175
embedding_func=EmbeddingFunc(
176176
embedding_dim=embedding_dim,
177177
max_token_size=8192,
178-
func=lambda texts: openai_embed(
178+
func=lambda texts: openai_embed.func(
179179
texts,
180180
model=embedding_model,
181181
api_key=api_key,

examples/raganything_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def vision_model_func(
189189
embedding_func = EmbeddingFunc(
190190
embedding_dim=embedding_dim,
191191
max_token_size=8192,
192-
func=lambda texts: openai_embed(
192+
func=lambda texts: openai_embed.func(
193193
texts,
194194
model=embedding_model,
195195
api_key=api_key,

0 commit comments

Comments
 (0)