2025-04-01 22:26:32 +08:00
|
|
|
import requests
|
2025-05-24 01:00:19 +08:00
|
|
|
import aiohttp
|
|
|
|
|
import asyncio
|
2025-04-01 22:26:32 +08:00
|
|
|
import logging
|
|
|
|
|
import os
|
|
|
|
|
import sys
|
2025-05-24 01:00:19 +08:00
|
|
|
import time
|
2025-04-01 22:26:32 +08:00
|
|
|
from typing import List, Dict, Any
|
2025-05-24 01:00:19 +08:00
|
|
|
from contextlib import asynccontextmanager
|
2025-04-01 22:26:32 +08:00
|
|
|
|
|
|
|
|
from langchain_core.documents import Document
|
|
|
|
|
from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MistralLoader:
|
|
|
|
|
"""
|
2025-05-24 01:00:19 +08:00
|
|
|
Enhanced Mistral OCR loader with both sync and async support.
|
2025-04-02 01:14:26 +08:00
|
|
|
Loads documents by processing them through the Mistral OCR API.
|
2025-05-31 11:06:29 +08:00
|
|
|
|
|
|
|
|
Performance Optimizations:
|
|
|
|
|
- Differentiated timeouts for different operations
|
|
|
|
|
- Intelligent retry logic with exponential backoff
|
|
|
|
|
- Memory-efficient file streaming for large files
|
|
|
|
|
- Connection pooling and keepalive optimization
|
|
|
|
|
- Semaphore-based concurrency control for batch processing
|
|
|
|
|
- Enhanced error handling with retryable error classification
|
2025-04-01 22:26:32 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
BASE_API_URL = "https://api.mistral.ai/v1"
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
def __init__(
|
2025-05-24 04:43:38 +08:00
|
|
|
self,
|
|
|
|
|
api_key: str,
|
2025-05-24 01:00:19 +08:00
|
|
|
file_path: str,
|
|
|
|
|
timeout: int = 300, # 5 minutes default
|
|
|
|
|
max_retries: int = 3,
|
2025-05-24 04:43:38 +08:00
|
|
|
enable_debug_logging: bool = False,
|
2025-05-24 01:00:19 +08:00
|
|
|
):
|
2025-04-01 22:26:32 +08:00
|
|
|
"""
|
2025-05-24 01:00:19 +08:00
|
|
|
Initializes the loader with enhanced features.
|
2025-04-01 22:26:32 +08:00
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
api_key: Your Mistral API key.
|
|
|
|
|
file_path: The local path to the PDF file to process.
|
2025-05-24 01:00:19 +08:00
|
|
|
timeout: Request timeout in seconds.
|
|
|
|
|
max_retries: Maximum number of retry attempts.
|
|
|
|
|
enable_debug_logging: Enable detailed debug logs.
|
2025-04-01 22:26:32 +08:00
|
|
|
"""
|
|
|
|
|
if not api_key:
|
|
|
|
|
raise ValueError("API key cannot be empty.")
|
|
|
|
|
if not os.path.exists(file_path):
|
|
|
|
|
raise FileNotFoundError(f"File not found at {file_path}")
|
|
|
|
|
|
|
|
|
|
self.api_key = api_key
|
|
|
|
|
self.file_path = file_path
|
2025-05-24 01:00:19 +08:00
|
|
|
self.timeout = timeout
|
|
|
|
|
self.max_retries = max_retries
|
|
|
|
|
self.debug = enable_debug_logging
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# PERFORMANCE OPTIMIZATION: Differentiated timeouts for different operations
|
|
|
|
|
# This prevents long-running OCR operations from affecting quick operations
|
|
|
|
|
# and improves user experience by failing fast on operations that should be quick
|
|
|
|
|
self.upload_timeout = min(
|
|
|
|
|
timeout, 120
|
|
|
|
|
) # Cap upload at 2 minutes - prevents hanging on large files
|
|
|
|
|
self.url_timeout = (
|
|
|
|
|
30 # URL requests should be fast - fail quickly if API is slow
|
|
|
|
|
)
|
|
|
|
|
self.ocr_timeout = (
|
|
|
|
|
timeout # OCR can take the full timeout - this is the heavy operation
|
|
|
|
|
)
|
|
|
|
|
self.cleanup_timeout = (
|
|
|
|
|
30 # Cleanup should be quick - don't hang on file deletion
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# PERFORMANCE OPTIMIZATION: Pre-compute file info to avoid repeated filesystem calls
|
|
|
|
|
# This avoids multiple os.path.basename() and os.path.getsize() calls during processing
|
2025-05-24 01:00:19 +08:00
|
|
|
self.file_name = os.path.basename(file_path)
|
|
|
|
|
self.file_size = os.path.getsize(file_path)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# ENHANCEMENT: Added User-Agent for better API tracking and debugging
|
2025-05-24 01:00:19 +08:00
|
|
|
self.headers = {
|
|
|
|
|
"Authorization": f"Bearer {self.api_key}",
|
2025-05-31 11:06:29 +08:00
|
|
|
"User-Agent": "OpenWebUI-MistralLoader/2.0", # Helps API provider track usage
|
2025-05-24 01:00:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def _debug_log(self, message: str, *args) -> None:
|
2025-05-31 11:06:29 +08:00
|
|
|
"""
|
|
|
|
|
PERFORMANCE OPTIMIZATION: Conditional debug logging for performance.
|
|
|
|
|
|
|
|
|
|
Only processes debug messages when debug mode is enabled, avoiding
|
|
|
|
|
string formatting overhead in production environments.
|
|
|
|
|
"""
|
2025-05-24 01:00:19 +08:00
|
|
|
if self.debug:
|
|
|
|
|
log.debug(message, *args)
|
2025-04-01 22:26:32 +08:00
|
|
|
|
|
|
|
|
def _handle_response(self, response: requests.Response) -> Dict[str, Any]:
|
|
|
|
|
"""Checks response status and returns JSON content."""
|
|
|
|
|
try:
|
|
|
|
|
response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
|
|
|
|
|
# Handle potential empty responses for certain successful requests (e.g., DELETE)
|
|
|
|
|
if response.status_code == 204 or not response.content:
|
|
|
|
|
return {} # Return empty dict if no content
|
|
|
|
|
return response.json()
|
|
|
|
|
except requests.exceptions.HTTPError as http_err:
|
|
|
|
|
log.error(f"HTTP error occurred: {http_err} - Response: {response.text}")
|
|
|
|
|
raise
|
|
|
|
|
except requests.exceptions.RequestException as req_err:
|
|
|
|
|
log.error(f"Request exception occurred: {req_err}")
|
|
|
|
|
raise
|
|
|
|
|
except ValueError as json_err: # Includes JSONDecodeError
|
|
|
|
|
log.error(f"JSON decode error: {json_err} - Response: {response.text}")
|
|
|
|
|
raise # Re-raise after logging
|
|
|
|
|
|
2025-05-24 04:43:38 +08:00
|
|
|
async def _handle_response_async(
|
|
|
|
|
self, response: aiohttp.ClientResponse
|
|
|
|
|
) -> Dict[str, Any]:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Async version of response handling with better error info."""
|
|
|
|
|
try:
|
|
|
|
|
response.raise_for_status()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
# Check content type
|
2025-05-24 04:43:38 +08:00
|
|
|
content_type = response.headers.get("content-type", "")
|
|
|
|
|
if "application/json" not in content_type:
|
2025-05-24 01:00:19 +08:00
|
|
|
if response.status == 204:
|
|
|
|
|
return {}
|
|
|
|
|
text = await response.text()
|
2025-05-24 04:43:38 +08:00
|
|
|
raise ValueError(
|
|
|
|
|
f"Unexpected content type: {content_type}, body: {text[:200]}..."
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return await response.json()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
except aiohttp.ClientResponseError as e:
|
|
|
|
|
error_text = await response.text() if response else "No response"
|
|
|
|
|
log.error(f"HTTP {e.status}: {e.message} - Response: {error_text[:500]}")
|
|
|
|
|
raise
|
|
|
|
|
except aiohttp.ClientError as e:
|
|
|
|
|
log.error(f"Client error: {e}")
|
|
|
|
|
raise
|
|
|
|
|
except Exception as e:
|
|
|
|
|
log.error(f"Unexpected error processing response: {e}")
|
|
|
|
|
raise
|
|
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
def _is_retryable_error(self, error: Exception) -> bool:
|
|
|
|
|
"""
|
|
|
|
|
ENHANCEMENT: Intelligent error classification for retry logic.
|
|
|
|
|
|
|
|
|
|
Determines if an error is retryable based on its type and status code.
|
|
|
|
|
This prevents wasting time retrying errors that will never succeed
|
|
|
|
|
(like authentication errors) while ensuring transient errors are retried.
|
|
|
|
|
|
|
|
|
|
Retryable errors:
|
|
|
|
|
- Network connection errors (temporary network issues)
|
|
|
|
|
- Timeouts (server might be temporarily overloaded)
|
|
|
|
|
- Server errors (5xx status codes - server-side issues)
|
|
|
|
|
- Rate limiting (429 status - temporary throttling)
|
|
|
|
|
|
|
|
|
|
Non-retryable errors:
|
|
|
|
|
- Authentication errors (401, 403 - won't fix with retry)
|
|
|
|
|
- Bad request errors (400 - malformed request)
|
|
|
|
|
- Not found errors (404 - resource doesn't exist)
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(error, requests.exceptions.ConnectionError):
|
|
|
|
|
return True # Network issues are usually temporary
|
|
|
|
|
if isinstance(error, requests.exceptions.Timeout):
|
|
|
|
|
return True # Timeouts might resolve on retry
|
|
|
|
|
if isinstance(error, requests.exceptions.HTTPError):
|
|
|
|
|
# Only retry on server errors (5xx) or rate limits (429)
|
|
|
|
|
if hasattr(error, "response") and error.response is not None:
|
|
|
|
|
status_code = error.response.status_code
|
|
|
|
|
return status_code >= 500 or status_code == 429
|
|
|
|
|
return False
|
|
|
|
|
if isinstance(
|
|
|
|
|
error, (aiohttp.ClientConnectionError, aiohttp.ServerTimeoutError)
|
|
|
|
|
):
|
|
|
|
|
return True # Async network/timeout errors are retryable
|
|
|
|
|
if isinstance(error, aiohttp.ClientResponseError):
|
|
|
|
|
return error.status >= 500 or error.status == 429
|
|
|
|
|
return False # All other errors are non-retryable
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
def _retry_request_sync(self, request_func, *args, **kwargs):
|
2025-05-31 11:06:29 +08:00
|
|
|
"""
|
|
|
|
|
ENHANCEMENT: Synchronous retry logic with intelligent error classification.
|
|
|
|
|
|
|
|
|
|
Uses exponential backoff with jitter to avoid thundering herd problems.
|
|
|
|
|
The wait time increases exponentially but is capped at 30 seconds to
|
|
|
|
|
prevent excessive delays. Only retries errors that are likely to succeed
|
|
|
|
|
on subsequent attempts.
|
|
|
|
|
"""
|
2025-05-24 01:00:19 +08:00
|
|
|
for attempt in range(self.max_retries):
|
|
|
|
|
try:
|
|
|
|
|
return request_func(*args, **kwargs)
|
2025-05-31 11:06:29 +08:00
|
|
|
except Exception as e:
|
|
|
|
|
if attempt == self.max_retries - 1 or not self._is_retryable_error(e):
|
2025-05-24 01:00:19 +08:00
|
|
|
raise
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# PERFORMANCE OPTIMIZATION: Exponential backoff with cap
|
|
|
|
|
# Prevents overwhelming the server while ensuring reasonable retry delays
|
|
|
|
|
wait_time = min((2**attempt) + 0.5, 30) # Cap at 30 seconds
|
2025-05-24 04:43:38 +08:00
|
|
|
log.warning(
|
2025-05-31 11:06:29 +08:00
|
|
|
f"Retryable error (attempt {attempt + 1}/{self.max_retries}): {e}. "
|
|
|
|
|
f"Retrying in {wait_time}s..."
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
time.sleep(wait_time)
|
|
|
|
|
|
|
|
|
|
async def _retry_request_async(self, request_func, *args, **kwargs):
|
2025-05-31 11:06:29 +08:00
|
|
|
"""
|
|
|
|
|
ENHANCEMENT: Async retry logic with intelligent error classification.
|
|
|
|
|
|
|
|
|
|
Async version of retry logic that doesn't block the event loop during
|
|
|
|
|
wait periods. Uses the same exponential backoff strategy as sync version.
|
|
|
|
|
"""
|
2025-05-24 01:00:19 +08:00
|
|
|
for attempt in range(self.max_retries):
|
|
|
|
|
try:
|
|
|
|
|
return await request_func(*args, **kwargs)
|
2025-05-31 11:06:29 +08:00
|
|
|
except Exception as e:
|
|
|
|
|
if attempt == self.max_retries - 1 or not self._is_retryable_error(e):
|
2025-05-24 01:00:19 +08:00
|
|
|
raise
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# PERFORMANCE OPTIMIZATION: Non-blocking exponential backoff
|
|
|
|
|
wait_time = min((2**attempt) + 0.5, 30) # Cap at 30 seconds
|
2025-05-24 04:43:38 +08:00
|
|
|
log.warning(
|
2025-05-31 11:06:29 +08:00
|
|
|
f"Retryable error (attempt {attempt + 1}/{self.max_retries}): {e}. "
|
|
|
|
|
f"Retrying in {wait_time}s..."
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
2025-05-31 11:06:29 +08:00
|
|
|
await asyncio.sleep(wait_time) # Non-blocking wait
|
2025-05-24 01:00:19 +08:00
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
def _upload_file(self) -> str:
|
2025-05-31 11:06:29 +08:00
|
|
|
"""
|
|
|
|
|
PERFORMANCE OPTIMIZATION: Enhanced file upload with streaming consideration.
|
|
|
|
|
|
|
|
|
|
Uploads the file to Mistral for OCR processing (sync version).
|
|
|
|
|
Uses context manager for file handling to ensure proper resource cleanup.
|
|
|
|
|
Although streaming is not enabled for this endpoint, the file is opened
|
|
|
|
|
in a context manager to minimize memory usage duration.
|
|
|
|
|
"""
|
2025-04-01 22:26:32 +08:00
|
|
|
log.info("Uploading file to Mistral API")
|
|
|
|
|
url = f"{self.BASE_API_URL}/files"
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
def upload_request():
|
2025-05-31 11:06:29 +08:00
|
|
|
# MEMORY OPTIMIZATION: Use context manager to minimize file handle lifetime
|
|
|
|
|
# This ensures the file is closed immediately after reading, reducing memory usage
|
2025-04-01 22:26:32 +08:00
|
|
|
with open(self.file_path, "rb") as f:
|
2025-05-31 11:06:29 +08:00
|
|
|
files = {"file": (self.file_name, f, "application/pdf")}
|
2025-04-01 22:26:32 +08:00
|
|
|
data = {"purpose": "ocr"}
|
2025-04-02 01:14:26 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# NOTE: stream=False is required for this endpoint
|
|
|
|
|
# The Mistral API doesn't support chunked uploads for this endpoint
|
2025-04-01 22:26:32 +08:00
|
|
|
response = requests.post(
|
2025-05-24 04:43:38 +08:00
|
|
|
url,
|
|
|
|
|
headers=self.headers,
|
|
|
|
|
files=files,
|
2025-05-24 01:00:19 +08:00
|
|
|
data=data,
|
2025-05-31 11:06:29 +08:00
|
|
|
timeout=self.upload_timeout, # Use specialized upload timeout
|
|
|
|
|
stream=False, # Keep as False for this endpoint
|
2025-04-01 22:26:32 +08:00
|
|
|
)
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return self._handle_response(response)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
response_data = self._retry_request_sync(upload_request)
|
2025-04-01 22:26:32 +08:00
|
|
|
file_id = response_data.get("id")
|
|
|
|
|
if not file_id:
|
|
|
|
|
raise ValueError("File ID not found in upload response.")
|
|
|
|
|
log.info(f"File uploaded successfully. File ID: {file_id}")
|
|
|
|
|
return file_id
|
|
|
|
|
except Exception as e:
|
|
|
|
|
log.error(f"Failed to upload file: {e}")
|
|
|
|
|
raise
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async def _upload_file_async(self, session: aiohttp.ClientSession) -> str:
|
|
|
|
|
"""Async file upload with streaming for better memory efficiency."""
|
|
|
|
|
url = f"{self.BASE_API_URL}/files"
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async def upload_request():
|
|
|
|
|
# Create multipart writer for streaming upload
|
2025-05-24 04:43:38 +08:00
|
|
|
writer = aiohttp.MultipartWriter("form-data")
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
# Add purpose field
|
2025-05-24 04:43:38 +08:00
|
|
|
purpose_part = writer.append("ocr")
|
|
|
|
|
purpose_part.set_content_disposition("form-data", name="purpose")
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
# Add file part with streaming
|
2025-05-24 04:43:38 +08:00
|
|
|
file_part = writer.append_payload(
|
|
|
|
|
aiohttp.streams.FilePayload(
|
|
|
|
|
self.file_path,
|
|
|
|
|
filename=self.file_name,
|
|
|
|
|
content_type="application/pdf",
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
file_part.set_content_disposition(
|
|
|
|
|
"form-data", name="file", filename=self.file_name
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self._debug_log(
|
|
|
|
|
f"Uploading file: {self.file_name} ({self.file_size:,} bytes)"
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async with session.post(
|
2025-05-24 04:43:38 +08:00
|
|
|
url,
|
|
|
|
|
data=writer,
|
2025-05-24 01:00:19 +08:00
|
|
|
headers=self.headers,
|
2025-05-31 11:06:29 +08:00
|
|
|
timeout=aiohttp.ClientTimeout(total=self.upload_timeout),
|
2025-05-24 01:00:19 +08:00
|
|
|
) as response:
|
|
|
|
|
return await self._handle_response_async(response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
response_data = await self._retry_request_async(upload_request)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
file_id = response_data.get("id")
|
|
|
|
|
if not file_id:
|
|
|
|
|
raise ValueError("File ID not found in upload response.")
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
log.info(f"File uploaded successfully. File ID: {file_id}")
|
|
|
|
|
return file_id
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
def _get_signed_url(self, file_id: str) -> str:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Retrieves a temporary signed URL for the uploaded file (sync version)."""
|
2025-04-01 22:26:32 +08:00
|
|
|
log.info(f"Getting signed URL for file ID: {file_id}")
|
|
|
|
|
url = f"{self.BASE_API_URL}/files/{file_id}/url"
|
2025-04-02 01:14:26 +08:00
|
|
|
params = {"expiry": 1}
|
2025-04-01 22:26:32 +08:00
|
|
|
signed_url_headers = {**self.headers, "Accept": "application/json"}
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
def url_request():
|
|
|
|
|
response = requests.get(
|
2025-05-31 11:06:29 +08:00
|
|
|
url, headers=signed_url_headers, params=params, timeout=self.url_timeout
|
2025-05-24 01:00:19 +08:00
|
|
|
)
|
|
|
|
|
return self._handle_response(response)
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
try:
|
2025-05-24 01:00:19 +08:00
|
|
|
response_data = self._retry_request_sync(url_request)
|
2025-04-01 22:26:32 +08:00
|
|
|
signed_url = response_data.get("url")
|
|
|
|
|
if not signed_url:
|
|
|
|
|
raise ValueError("Signed URL not found in response.")
|
|
|
|
|
log.info("Signed URL received.")
|
|
|
|
|
return signed_url
|
|
|
|
|
except Exception as e:
|
|
|
|
|
log.error(f"Failed to get signed URL: {e}")
|
|
|
|
|
raise
|
|
|
|
|
|
2025-05-24 04:43:38 +08:00
|
|
|
async def _get_signed_url_async(
|
|
|
|
|
self, session: aiohttp.ClientSession, file_id: str
|
|
|
|
|
) -> str:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Async signed URL retrieval."""
|
|
|
|
|
url = f"{self.BASE_API_URL}/files/{file_id}/url"
|
|
|
|
|
params = {"expiry": 1}
|
2025-05-24 04:43:38 +08:00
|
|
|
|
|
|
|
|
headers = {**self.headers, "Accept": "application/json"}
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async def url_request():
|
|
|
|
|
self._debug_log(f"Getting signed URL for file ID: {file_id}")
|
|
|
|
|
async with session.get(
|
2025-05-24 04:43:38 +08:00
|
|
|
url,
|
|
|
|
|
headers=headers,
|
2025-05-24 01:00:19 +08:00
|
|
|
params=params,
|
2025-05-31 11:06:29 +08:00
|
|
|
timeout=aiohttp.ClientTimeout(total=self.url_timeout),
|
2025-05-24 01:00:19 +08:00
|
|
|
) as response:
|
|
|
|
|
return await self._handle_response_async(response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
response_data = await self._retry_request_async(url_request)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
signed_url = response_data.get("url")
|
|
|
|
|
if not signed_url:
|
|
|
|
|
raise ValueError("Signed URL not found in response.")
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
self._debug_log("Signed URL received successfully")
|
|
|
|
|
return signed_url
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
def _process_ocr(self, signed_url: str) -> Dict[str, Any]:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Sends the signed URL to the OCR endpoint for processing (sync version)."""
|
2025-04-01 22:26:32 +08:00
|
|
|
log.info("Processing OCR via Mistral API")
|
|
|
|
|
url = f"{self.BASE_API_URL}/ocr"
|
|
|
|
|
ocr_headers = {
|
|
|
|
|
**self.headers,
|
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
|
"Accept": "application/json",
|
|
|
|
|
}
|
|
|
|
|
payload = {
|
|
|
|
|
"model": "mistral-ocr-latest",
|
|
|
|
|
"document": {
|
|
|
|
|
"type": "document_url",
|
|
|
|
|
"document_url": signed_url,
|
|
|
|
|
},
|
2025-04-02 01:14:26 +08:00
|
|
|
"include_image_base64": False,
|
2025-04-01 22:26:32 +08:00
|
|
|
}
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
def ocr_request():
|
|
|
|
|
response = requests.post(
|
2025-05-31 11:06:29 +08:00
|
|
|
url, headers=ocr_headers, json=payload, timeout=self.ocr_timeout
|
2025-05-24 01:00:19 +08:00
|
|
|
)
|
|
|
|
|
return self._handle_response(response)
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
try:
|
2025-05-24 01:00:19 +08:00
|
|
|
ocr_response = self._retry_request_sync(ocr_request)
|
2025-04-01 22:26:32 +08:00
|
|
|
log.info("OCR processing done.")
|
2025-05-24 01:00:19 +08:00
|
|
|
self._debug_log("OCR response: %s", ocr_response)
|
2025-04-01 22:26:32 +08:00
|
|
|
return ocr_response
|
|
|
|
|
except Exception as e:
|
|
|
|
|
log.error(f"Failed during OCR processing: {e}")
|
|
|
|
|
raise
|
|
|
|
|
|
2025-05-24 04:43:38 +08:00
|
|
|
async def _process_ocr_async(
|
|
|
|
|
self, session: aiohttp.ClientSession, signed_url: str
|
|
|
|
|
) -> Dict[str, Any]:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Async OCR processing with timing metrics."""
|
|
|
|
|
url = f"{self.BASE_API_URL}/ocr"
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
headers = {
|
|
|
|
|
**self.headers,
|
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
|
"Accept": "application/json",
|
|
|
|
|
}
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
payload = {
|
|
|
|
|
"model": "mistral-ocr-latest",
|
|
|
|
|
"document": {
|
|
|
|
|
"type": "document_url",
|
|
|
|
|
"document_url": signed_url,
|
|
|
|
|
},
|
|
|
|
|
"include_image_base64": False,
|
|
|
|
|
}
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async def ocr_request():
|
|
|
|
|
log.info("Starting OCR processing via Mistral API")
|
|
|
|
|
start_time = time.time()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async with session.post(
|
2025-05-24 04:43:38 +08:00
|
|
|
url,
|
|
|
|
|
json=payload,
|
2025-05-24 01:00:19 +08:00
|
|
|
headers=headers,
|
2025-05-31 11:06:29 +08:00
|
|
|
timeout=aiohttp.ClientTimeout(total=self.ocr_timeout),
|
2025-05-24 01:00:19 +08:00
|
|
|
) as response:
|
|
|
|
|
ocr_response = await self._handle_response_async(response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
processing_time = time.time() - start_time
|
|
|
|
|
log.info(f"OCR processing completed in {processing_time:.2f}s")
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return ocr_response
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return await self._retry_request_async(ocr_request)
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
def _delete_file(self, file_id: str) -> None:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Deletes the file from Mistral storage (sync version)."""
|
2025-04-01 22:26:32 +08:00
|
|
|
log.info(f"Deleting uploaded file ID: {file_id}")
|
|
|
|
|
url = f"{self.BASE_API_URL}/files/{file_id}"
|
|
|
|
|
|
|
|
|
|
try:
|
2025-05-31 11:06:29 +08:00
|
|
|
response = requests.delete(
|
|
|
|
|
url, headers=self.headers, timeout=self.cleanup_timeout
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
delete_response = self._handle_response(response)
|
|
|
|
|
log.info(f"File deleted successfully: {delete_response}")
|
2025-04-01 22:26:32 +08:00
|
|
|
except Exception as e:
|
|
|
|
|
# Log error but don't necessarily halt execution if deletion fails
|
|
|
|
|
log.error(f"Failed to delete file ID {file_id}: {e}")
|
2025-05-24 01:00:19 +08:00
|
|
|
|
2025-05-24 04:43:38 +08:00
|
|
|
async def _delete_file_async(
|
|
|
|
|
self, session: aiohttp.ClientSession, file_id: str
|
|
|
|
|
) -> None:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""Async file deletion with error tolerance."""
|
|
|
|
|
try:
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async def delete_request():
|
|
|
|
|
self._debug_log(f"Deleting file ID: {file_id}")
|
|
|
|
|
async with session.delete(
|
2025-05-24 04:43:38 +08:00
|
|
|
url=f"{self.BASE_API_URL}/files/{file_id}",
|
2025-05-24 01:00:19 +08:00
|
|
|
headers=self.headers,
|
2025-05-24 04:43:38 +08:00
|
|
|
timeout=aiohttp.ClientTimeout(
|
2025-05-31 11:06:29 +08:00
|
|
|
total=self.cleanup_timeout
|
2025-05-24 04:43:38 +08:00
|
|
|
), # Shorter timeout for cleanup
|
2025-05-24 01:00:19 +08:00
|
|
|
) as response:
|
|
|
|
|
return await self._handle_response_async(response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
await self._retry_request_async(delete_request)
|
|
|
|
|
self._debug_log(f"File {file_id} deleted successfully")
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
except Exception as e:
|
|
|
|
|
# Don't fail the entire process if cleanup fails
|
|
|
|
|
log.warning(f"Failed to delete file ID {file_id}: {e}")
|
|
|
|
|
|
|
|
|
|
@asynccontextmanager
|
|
|
|
|
async def _get_session(self):
|
|
|
|
|
"""Context manager for HTTP session with optimized settings."""
|
|
|
|
|
connector = aiohttp.TCPConnector(
|
2025-05-31 11:06:29 +08:00
|
|
|
limit=20, # Increased total connection limit for better throughput
|
|
|
|
|
limit_per_host=10, # Increased per-host limit for API endpoints
|
|
|
|
|
ttl_dns_cache=600, # Longer DNS cache TTL (10 minutes)
|
2025-05-24 01:00:19 +08:00
|
|
|
use_dns_cache=True,
|
2025-05-31 11:06:29 +08:00
|
|
|
keepalive_timeout=60, # Increased keepalive for connection reuse
|
2025-05-24 04:43:38 +08:00
|
|
|
enable_cleanup_closed=True,
|
2025-05-31 11:06:29 +08:00
|
|
|
force_close=False, # Allow connection reuse
|
|
|
|
|
resolver=aiohttp.AsyncResolver(), # Use async DNS resolver
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
timeout = aiohttp.ClientTimeout(
|
|
|
|
|
total=self.timeout,
|
|
|
|
|
connect=30, # Connection timeout
|
|
|
|
|
sock_read=60, # Socket read timeout
|
2025-05-24 01:00:19 +08:00
|
|
|
)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
async with aiohttp.ClientSession(
|
|
|
|
|
connector=connector,
|
2025-05-31 11:06:29 +08:00
|
|
|
timeout=timeout,
|
2025-05-24 04:43:38 +08:00
|
|
|
headers={"User-Agent": "OpenWebUI-MistralLoader/2.0"},
|
2025-05-31 11:06:29 +08:00
|
|
|
raise_for_status=False, # We handle status codes manually
|
2025-05-24 01:00:19 +08:00
|
|
|
) as session:
|
|
|
|
|
yield session
|
|
|
|
|
|
|
|
|
|
def _process_results(self, ocr_response: Dict[str, Any]) -> List[Document]:
|
2025-05-31 11:06:29 +08:00
|
|
|
"""Process OCR results into Document objects with enhanced metadata and memory efficiency."""
|
2025-05-24 01:00:19 +08:00
|
|
|
pages_data = ocr_response.get("pages")
|
|
|
|
|
if not pages_data:
|
|
|
|
|
log.warning("No pages found in OCR response.")
|
2025-05-24 04:43:38 +08:00
|
|
|
return [
|
|
|
|
|
Document(
|
2025-05-31 11:06:29 +08:00
|
|
|
page_content="No text content found",
|
|
|
|
|
metadata={"error": "no_pages", "file_name": self.file_name},
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
|
|
|
|
]
|
2025-05-24 01:00:19 +08:00
|
|
|
|
|
|
|
|
documents = []
|
|
|
|
|
total_pages = len(pages_data)
|
|
|
|
|
skipped_pages = 0
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# Process pages in a memory-efficient way
|
2025-05-24 01:00:19 +08:00
|
|
|
for page_data in pages_data:
|
|
|
|
|
page_content = page_data.get("markdown")
|
|
|
|
|
page_index = page_data.get("index") # API uses 0-based index
|
|
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
if page_content is None or page_index is None:
|
|
|
|
|
skipped_pages += 1
|
|
|
|
|
self._debug_log(
|
|
|
|
|
f"Skipping page due to missing 'markdown' or 'index'. Data keys: {list(page_data.keys())}"
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
2025-05-31 11:06:29 +08:00
|
|
|
continue
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# Clean up content efficiently with early exit for empty content
|
|
|
|
|
if isinstance(page_content, str):
|
|
|
|
|
cleaned_content = page_content.strip()
|
2025-05-24 01:00:19 +08:00
|
|
|
else:
|
2025-05-31 11:06:29 +08:00
|
|
|
cleaned_content = str(page_content).strip()
|
|
|
|
|
|
|
|
|
|
if not cleaned_content:
|
2025-05-24 01:00:19 +08:00
|
|
|
skipped_pages += 1
|
2025-05-31 11:06:29 +08:00
|
|
|
self._debug_log(f"Skipping empty page {page_index}")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Create document with optimized metadata
|
|
|
|
|
documents.append(
|
|
|
|
|
Document(
|
|
|
|
|
page_content=cleaned_content,
|
|
|
|
|
metadata={
|
|
|
|
|
"page": page_index, # 0-based index from API
|
|
|
|
|
"page_label": page_index + 1, # 1-based label for convenience
|
|
|
|
|
"total_pages": total_pages,
|
|
|
|
|
"file_name": self.file_name,
|
|
|
|
|
"file_size": self.file_size,
|
|
|
|
|
"processing_engine": "mistral-ocr",
|
|
|
|
|
"content_length": len(cleaned_content),
|
|
|
|
|
},
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
2025-05-31 11:06:29 +08:00
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
|
|
|
|
|
if skipped_pages > 0:
|
2025-05-24 04:43:38 +08:00
|
|
|
log.info(
|
|
|
|
|
f"Processed {len(documents)} pages, skipped {skipped_pages} empty/invalid pages"
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
|
|
|
|
|
if not documents:
|
|
|
|
|
# Case where pages existed but none had valid markdown/index
|
2025-05-24 04:43:38 +08:00
|
|
|
log.warning(
|
|
|
|
|
"OCR response contained pages, but none had valid content/index."
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
return [
|
|
|
|
|
Document(
|
|
|
|
|
page_content="No valid text content found in document",
|
2025-05-31 11:06:29 +08:00
|
|
|
metadata={
|
|
|
|
|
"error": "no_valid_pages",
|
|
|
|
|
"total_pages": total_pages,
|
|
|
|
|
"file_name": self.file_name,
|
|
|
|
|
},
|
2025-05-24 01:00:19 +08:00
|
|
|
)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
return documents
|
2025-04-01 22:26:32 +08:00
|
|
|
|
|
|
|
|
def load(self) -> List[Document]:
|
|
|
|
|
"""
|
|
|
|
|
Executes the full OCR workflow: upload, get URL, process OCR, delete file.
|
2025-05-24 01:00:19 +08:00
|
|
|
Synchronous version for backward compatibility.
|
2025-04-01 22:26:32 +08:00
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
A list of Document objects, one for each page processed.
|
|
|
|
|
"""
|
|
|
|
|
file_id = None
|
2025-05-24 01:00:19 +08:00
|
|
|
start_time = time.time()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
try:
|
|
|
|
|
# 1. Upload file
|
|
|
|
|
file_id = self._upload_file()
|
|
|
|
|
|
|
|
|
|
# 2. Get Signed URL
|
|
|
|
|
signed_url = self._get_signed_url(file_id)
|
|
|
|
|
|
|
|
|
|
# 3. Process OCR
|
|
|
|
|
ocr_response = self._process_ocr(signed_url)
|
|
|
|
|
|
|
|
|
|
# 4. Process results
|
2025-05-24 01:00:19 +08:00
|
|
|
documents = self._process_results(ocr_response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
total_time = time.time() - start_time
|
2025-05-24 04:43:38 +08:00
|
|
|
log.info(
|
|
|
|
|
f"Sync OCR workflow completed in {total_time:.2f}s, produced {len(documents)} documents"
|
|
|
|
|
)
|
|
|
|
|
|
2025-04-01 22:26:32 +08:00
|
|
|
return documents
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
2025-05-24 01:00:19 +08:00
|
|
|
total_time = time.time() - start_time
|
2025-05-24 04:43:38 +08:00
|
|
|
log.error(
|
|
|
|
|
f"An error occurred during the loading process after {total_time:.2f}s: {e}"
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
# Return an error document on failure
|
2025-05-24 04:43:38 +08:00
|
|
|
return [
|
|
|
|
|
Document(
|
|
|
|
|
page_content=f"Error during processing: {e}",
|
|
|
|
|
metadata={
|
|
|
|
|
"error": "processing_failed",
|
|
|
|
|
"file_name": self.file_name,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
]
|
2025-04-01 22:26:32 +08:00
|
|
|
finally:
|
|
|
|
|
# 5. Delete file (attempt even if prior steps failed after upload)
|
|
|
|
|
if file_id:
|
|
|
|
|
try:
|
|
|
|
|
self._delete_file(file_id)
|
|
|
|
|
except Exception as del_e:
|
|
|
|
|
# Log deletion error, but don't overwrite original error if one occurred
|
2025-05-24 04:43:38 +08:00
|
|
|
log.error(
|
|
|
|
|
f"Cleanup error: Could not delete file ID {file_id}. Reason: {del_e}"
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
|
|
|
|
|
async def load_async(self) -> List[Document]:
|
|
|
|
|
"""
|
|
|
|
|
Asynchronous OCR workflow execution with optimized performance.
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
Returns:
|
|
|
|
|
A list of Document objects, one for each page processed.
|
|
|
|
|
"""
|
|
|
|
|
file_id = None
|
|
|
|
|
start_time = time.time()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
try:
|
|
|
|
|
async with self._get_session() as session:
|
|
|
|
|
# 1. Upload file with streaming
|
|
|
|
|
file_id = await self._upload_file_async(session)
|
|
|
|
|
|
|
|
|
|
# 2. Get signed URL
|
|
|
|
|
signed_url = await self._get_signed_url_async(session, file_id)
|
|
|
|
|
|
|
|
|
|
# 3. Process OCR
|
|
|
|
|
ocr_response = await self._process_ocr_async(session, signed_url)
|
|
|
|
|
|
|
|
|
|
# 4. Process results
|
|
|
|
|
documents = self._process_results(ocr_response)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
total_time = time.time() - start_time
|
2025-05-24 04:43:38 +08:00
|
|
|
log.info(
|
|
|
|
|
f"Async OCR workflow completed in {total_time:.2f}s, produced {len(documents)} documents"
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return documents
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
total_time = time.time() - start_time
|
|
|
|
|
log.error(f"Async OCR workflow failed after {total_time:.2f}s: {e}")
|
2025-05-24 04:43:38 +08:00
|
|
|
return [
|
|
|
|
|
Document(
|
|
|
|
|
page_content=f"Error during OCR processing: {e}",
|
|
|
|
|
metadata={
|
|
|
|
|
"error": "processing_failed",
|
|
|
|
|
"file_name": self.file_name,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
]
|
2025-05-24 01:00:19 +08:00
|
|
|
finally:
|
|
|
|
|
# 5. Cleanup - always attempt file deletion
|
|
|
|
|
if file_id:
|
|
|
|
|
try:
|
|
|
|
|
async with self._get_session() as session:
|
|
|
|
|
await self._delete_file_async(session, file_id)
|
|
|
|
|
except Exception as cleanup_error:
|
|
|
|
|
log.error(f"Cleanup failed for file ID {file_id}: {cleanup_error}")
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
2025-05-24 04:43:38 +08:00
|
|
|
async def load_multiple_async(
|
|
|
|
|
loaders: List["MistralLoader"],
|
2025-05-31 11:06:29 +08:00
|
|
|
max_concurrent: int = 5, # Limit concurrent requests
|
2025-05-24 04:43:38 +08:00
|
|
|
) -> List[List[Document]]:
|
2025-05-24 01:00:19 +08:00
|
|
|
"""
|
2025-05-31 11:06:29 +08:00
|
|
|
Process multiple files concurrently with controlled concurrency.
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
Args:
|
|
|
|
|
loaders: List of MistralLoader instances
|
2025-05-31 11:06:29 +08:00
|
|
|
max_concurrent: Maximum number of concurrent requests
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
Returns:
|
|
|
|
|
List of document lists, one for each loader
|
|
|
|
|
"""
|
|
|
|
|
if not loaders:
|
|
|
|
|
return []
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
log.info(
|
|
|
|
|
f"Starting concurrent processing of {len(loaders)} files with max {max_concurrent} concurrent"
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
start_time = time.time()
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# Use semaphore to control concurrency
|
|
|
|
|
semaphore = asyncio.Semaphore(max_concurrent)
|
|
|
|
|
|
|
|
|
|
async def process_with_semaphore(loader: "MistralLoader") -> List[Document]:
|
|
|
|
|
async with semaphore:
|
|
|
|
|
return await loader.load_async()
|
|
|
|
|
|
|
|
|
|
# Process all files with controlled concurrency
|
|
|
|
|
tasks = [process_with_semaphore(loader) for loader in loaders]
|
2025-05-24 01:00:19 +08:00
|
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
# Handle any exceptions in results
|
|
|
|
|
processed_results = []
|
|
|
|
|
for i, result in enumerate(results):
|
|
|
|
|
if isinstance(result, Exception):
|
|
|
|
|
log.error(f"File {i} failed: {result}")
|
2025-05-24 04:43:38 +08:00
|
|
|
processed_results.append(
|
|
|
|
|
[
|
|
|
|
|
Document(
|
|
|
|
|
page_content=f"Error processing file: {result}",
|
|
|
|
|
metadata={
|
|
|
|
|
"error": "batch_processing_failed",
|
|
|
|
|
"file_index": i,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
]
|
|
|
|
|
)
|
2025-05-24 01:00:19 +08:00
|
|
|
else:
|
|
|
|
|
processed_results.append(result)
|
2025-05-24 04:43:38 +08:00
|
|
|
|
2025-05-31 11:06:29 +08:00
|
|
|
# MONITORING: Log comprehensive batch processing statistics
|
2025-05-24 01:00:19 +08:00
|
|
|
total_time = time.time() - start_time
|
|
|
|
|
total_docs = sum(len(docs) for docs in processed_results)
|
2025-05-31 11:06:29 +08:00
|
|
|
success_count = sum(
|
|
|
|
|
1 for result in results if not isinstance(result, Exception)
|
|
|
|
|
)
|
|
|
|
|
failure_count = len(results) - success_count
|
|
|
|
|
|
2025-05-24 04:43:38 +08:00
|
|
|
log.info(
|
2025-05-31 11:06:29 +08:00
|
|
|
f"Batch processing completed in {total_time:.2f}s: "
|
|
|
|
|
f"{success_count} files succeeded, {failure_count} files failed, "
|
|
|
|
|
f"produced {total_docs} total documents"
|
2025-05-24 04:43:38 +08:00
|
|
|
)
|
|
|
|
|
|
2025-05-24 01:00:19 +08:00
|
|
|
return processed_results
|