A powerful async-first localization engine that supports various content types including plain text, objects, chat sequences, and HTML documents.
- π Async-first design for high-performance concurrent translations
- π Concurrent processing for dramatically faster bulk translations
- π― Multiple content types: text, objects, chat messages, and more
- π Auto-detection of source languages
- β‘ Fast mode for quick translations
- π§ Flexible configuration with progress callbacks
- π¦ Context manager support for proper resource management
The async implementation provides significant performance improvements:
- Concurrent chunk processing for large payloads
- Batch operations for multiple translations
- Parallel API requests instead of sequential ones
- Better resource management with httpx
pip install lingodotdevimport asyncio
from lingodotdev import LingoDotDevEngine
async def main():
# Quick one-off translation (handles context management automatically)
result = await LingoDotDevEngine.quick_translate(
"Hello, world!",
api_key="your-api-key",
target_locale="es"
)
print(result) # "Β‘Hola, mundo!"
asyncio.run(main())import asyncio
from lingodotdev import LingoDotDevEngine
async def main():
config = {
"api_key": "your-api-key",
"api_url": "https://engine.lingo.dev" # Optional, defaults to this
}
async with LingoDotDevEngine(config) as engine:
# Translate text
text_result = await engine.localize_text(
"Hello, world!",
{"target_locale": "es"}
)
# Translate object with concurrent processing
obj_result = await engine.localize_object(
{
"greeting": "Hello",
"farewell": "Goodbye",
"question": "How are you?"
},
{"target_locale": "es"},
concurrent=True # Process chunks concurrently for speed
)
asyncio.run(main())async def batch_example():
# Translate to multiple languages at once
results = await LingoDotDevEngine.quick_batch_translate(
"Welcome to our application",
api_key="your-api-key",
target_locales=["es", "fr", "de", "it"]
)
# Results: ["Bienvenido...", "Bienvenue...", "Willkommen...", "Benvenuto..."]async def progress_example():
def progress_callback(progress, source_chunk, processed_chunk):
print(f"Progress: {progress}% - Processed {len(processed_chunk)} items")
large_content = {f"item_{i}": f"Content {i}" for i in range(1000)}
async with LingoDotDevEngine({"api_key": "your-api-key"}) as engine:
result = await engine.localize_object(
large_content,
{"target_locale": "es"},
progress_callback=progress_callback,
concurrent=True # Much faster for large objects
)async def chat_example():
chat_messages = [
{"name": "Alice", "text": "Hello everyone!"},
{"name": "Bob", "text": "How is everyone doing?"},
{"name": "Charlie", "text": "Great to see you all!"}
]
async with LingoDotDevEngine({"api_key": "your-api-key"}) as engine:
translated_chat = await engine.localize_chat(
chat_messages,
{"source_locale": "en", "target_locale": "es"}
)
# Names preserved, text translatedasync def concurrent_objects_example():
objects = [
{"title": "Welcome", "description": "Please sign in"},
{"error": "Invalid input", "help": "Check your email"},
{"success": "Account created", "next": "Continue to dashboard"}
]
async with LingoDotDevEngine({"api_key": "your-api-key"}) as engine:
results = await engine.batch_localize_objects(
objects,
{"target_locale": "fr"}
)
# All objects translated concurrentlyasync def detection_example():
async with LingoDotDevEngine({"api_key": "your-api-key"}) as engine:
detected = await engine.recognize_locale("Bonjour le monde")
print(detected) # "fr"config = {
"api_key": "your-api-key", # Required: Your API key
"api_url": "https://engine.lingo.dev", # Optional: API endpoint
"batch_size": 25, # Optional: Items per batch (1-250)
"ideal_batch_item_size": 250, # Optional: Target words per batch (1-2500)
"retry_max_attempts": 3, # Optional: Max retry attempts (0-10, 0=disabled)
"retry_base_delay": 1.0, # Optional: Base delay between retries (0.1-10.0s)
"retry_max_timeout": 60.0 # Optional: Total timeout for all retries (1-300s)
}The SDK automatically handles transient failures with intelligent exponential backoff:
- Retries: 5xx server errors, 429 rate limits, and network timeouts
- No retries: 4xx client errors (except 429)
- Exponential backoff:
base_delay * (2^attempt) + jitter - Rate limiting: Respects
Retry-Afterheaders from 429 responses - Timeout protection: Stops retrying if total time would exceed
retry_max_timeout
# Custom retry configuration
from lingodotdev import EngineConfig
config = EngineConfig(
api_key="your-api-key",
retry_max_attempts=5, # More aggressive retrying
retry_base_delay=0.5, # Faster initial retry
retry_max_timeout=30.0 # Shorter total timeout
)
async with LingoDotDevEngine(config) as engine:
result = await engine.localize_text("Hello", {"target_locale": "es"})
# Disable retries completely
config = EngineConfig(api_key="your-api-key", retry_max_attempts=0)- source_locale: Source language code (auto-detected if None)
- target_locale: Target language code (required)
- fast: Enable fast mode for quicker translations
- reference: Reference translations for context
- concurrent: Process chunks concurrently (faster, but no progress callbacks)
- concurrent=True: Enables parallel processing of chunks
- progress_callback: Function to track progress (disabled with concurrent=True)
async def error_handling_example():
try:
async with LingoDotDevEngine({"api_key": "invalid-key"}) as engine:
result = await engine.localize_text("Hello", {"target_locale": "es"})
except ValueError as e:
print(f"Invalid request: {e}")
except RuntimeError as e:
print(f"API error: {e}")- Use
concurrent=Truefor large objects or multiple chunks - Use
batch_localize_objects()for multiple objects - Use context managers for multiple operations
- Use
quick_translate()for one-off translations - Adjust
batch_sizebased on your content structure
The async version is a drop-in replacement with these changes:
- Add
async/awaitto all method calls - Use
async withfor context managers - All methods now return awaitable coroutines
localize_text(text, params)- Translate text stringslocalize_object(obj, params)- Translate dictionary objectslocalize_chat(chat, params)- Translate chat messagesbatch_localize_text(text, params)- Translate to multiple languagesbatch_localize_objects(objects, params)- Translate multiple objectsrecognize_locale(text)- Detect languagewhoami()- Get API account info
quick_translate(content, api_key, target_locale, ...)- One-off translationquick_batch_translate(content, api_key, target_locales, ...)- Batch translation
Apache-2.0 License
- π Documentation
- π Issues
- π¬ Community