diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e04470d0e..17ed1a98e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,60 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.31] - 2025-09-25 + +### Added + +- 🔌 MCP (streamable HTTP) server support was added alongside existing OpenAPI server integration, allowing users to connect both server types through an improved server configuration interface. [#15932](https://github.com/open-webui/open-webui/issues/15932) [#16651](https://github.com/open-webui/open-webui/pull/16651), [Commit](https://github.com/open-webui/open-webui/commit/fd7385c3921eb59af76a26f4c475aedb38ce2406), [Commit](https://github.com/open-webui/open-webui/commit/777e81f7a8aca957a359d51df8388e5af4721a68), [Commit](https://github.com/open-webui/open-webui/commit/de7f7b3d855641450f8e5aac34fbae0665e0b80e), [Commit](https://github.com/open-webui/open-webui/commit/f1bbf3a91e4713039364b790e886e59b401572d0), [Commit](https://github.com/open-webui/open-webui/commit/c55afc42559c32a6f0c8beb0f1bb18e9360ab8af), [Commit](https://github.com/open-webui/open-webui/commit/61f20acf61f4fe30c0e5b0180949f6e1a8cf6524) +- 🔐 To enable MCP server authentication, OAuth 2.1 dynamic client registration was implemented with secure automatic client registration, encrypted session management, and seamless authentication flows. [Commit](https://github.com/open-webui/open-webui/commit/972be4eda5a394c111e849075f94099c9c0dd9aa), [Commit](https://github.com/open-webui/open-webui/commit/77e971dd9fbeee806e2864e686df5ec75e82104b), [Commit](https://github.com/open-webui/open-webui/commit/879abd7feea3692a2f157da4a458d30f27217508), [Commit](https://github.com/open-webui/open-webui/commit/422d38fd114b1ebd8a7dbb114d64e14791e67d7a), [Docs:#709](https://github.com/open-webui/docs/pull/709) +- 🛠️ External & Built-In Tools can now support rich UI element embedding ([Docs](https://docs.openwebui.com/features/plugin/tools/development)), allowing tools to return HTML content and interactive iframes that display directly within chat conversations with configurable security settings. [Commit](https://github.com/open-webui/open-webui/commit/07c5b25bc8b63173f406feb3ba183d375fedee6a), [Commit](https://github.com/open-webui/open-webui/commit/a5d8882bba7933a2c2c31c0a1405aba507c370bb), [Commit](https://github.com/open-webui/open-webui/commit/7be5b7f50f498de97359003609fc5993a172f084), [Commit](https://github.com/open-webui/open-webui/commit/a89ffccd7e96705a4a40e845289f4fcf9c4ae596) +- 📝 Note editor now supports drag-and-drop reordering of list items with visual drag handles, making list organization more intuitive and efficient. [Commit](https://github.com/open-webui/open-webui/commit/e4e97e727e9b4971f1c363b1280ca3a101599d88), [Commit](https://github.com/open-webui/open-webui/commit/aeb5288a3c7a6e9e0a47b807cc52f870c1b7dbe6) +- 🔍 Search modal was enhanced with quick action buttons for starting new conversations and creating notes, with intelligent content pre-population from search queries. [Commit](https://github.com/open-webui/open-webui/commit/aa6f63a335e172fec1dc94b2056541f52c1167a6), [Commit](https://github.com/open-webui/open-webui/commit/612a52d7bb7dbe9fa0bbbc8ac0a552d2b9801146), [Commit](https://github.com/open-webui/open-webui/commit/b03529b006f3148e895b1094584e1ab129ecac5b) +- 🛠️ Tool user valve configuration interface was added to the integrations menu, displaying clickable gear icon buttons with tooltips for tools that support user-specific settings, making personal tool configurations easily accessible. [Commit](https://github.com/open-webui/open-webui/commit/27d61307cdce97ed11a05ec13fc300249d6022cd) +- 👥 Channel access control was enhanced to require write permissions for posting, editing, and deleting messages, while read-only users can view content but cannot contribute. [#17543](https://github.com/open-webui/open-webui/pull/17543) +- 💬 Channel models now support image processing, allowing AI assistants to view and analyze images shared in conversation threads. [Commit](https://github.com/open-webui/open-webui/commit/9f0010e234a6f40782a66021435d3c02b9c23639) +- 🌐 Attach Webpage button was added to the message input menu, providing a user-friendly modal interface for attaching web content and YouTube videos as an alternative to the existing URL syntax. [#17534](https://github.com/open-webui/open-webui/pull/17534) +- 🔐 Redis session storage support was added for OAuth redirects, providing better state handling in multi-pod Kubernetes deployments and resolving CSRF mismatch errors. [#17223](https://github.com/open-webui/open-webui/pull/17223), [#15373](https://github.com/open-webui/open-webui/issues/15373) +- 🔍 Ollama Cloud web search integration was added as a new search engine option, providing access to web search functionality through Ollama's cloud infrastructure. [Commit](https://github.com/open-webui/open-webui/commit/e06489d92baca095b8f376fbef223298c7772579), [Commit](https://github.com/open-webui/open-webui/commit/4b6d34438bcfc45463dc7a9cb984794b32c1f0a1), [Commit](https://github.com/open-webui/open-webui/commit/05c46008da85357dc6890b846789dfaa59f4a520), [Commit](https://github.com/open-webui/open-webui/commit/fe65fe0b97ec5a8fff71592ff04a25c8e123d108), [Docs:#708](https://github.com/open-webui/docs/pull/708) +- 🔍 Perplexity Websearch API integration was added as a new search engine option, providing access to the new websearch functionality provided by Perplexity. [#17756](https://github.com/open-webui/open-webui/issues/17756), [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/7f411dd5cc1c29733216f79e99eeeed0406a2afe) +- ☁️ OneDrive integration was improved to support separate client IDs for personal and business authentication, enabling both integrations to work simultaneously. [#17619](https://github.com/open-webui/open-webui/pull/17619), [Docs](https://docs.openwebui.com/tutorials/integrations/onedrive-sharepoint), [Docs](https://docs.openwebui.com/getting-started/env-configuration/#onedrive) +- 📝 Pending user overlay content now supports markdown formatting, enabling rich text display for custom messages similar to banner functionality. [#17681](https://github.com/open-webui/open-webui/pull/17681) +- 🎨 Image generation model selection was centralized to enable dynamic model override in function calls, allowing pipes and tools to specify different models than the global default while maintaining backward compatibility. [#17689](https://github.com/open-webui/open-webui/pull/17689) +- 🎨 Interface design was modernized with updated visual styling, improved spacing, and refined component layouts across modals, sidebar, settings, and navigation elements. [Commit](https://github.com/open-webui/open-webui/commit/27a91cc80a24bda0a3a188bc3120a8ab57b00881), [Commit](https://github.com/open-webui/open-webui/commit/4ad743098615f9c58daa9df392f31109aeceeb16), [Commit](https://github.com/open-webui/open-webui/commit/fd7385c3921eb59af76a26f4c475aedb38ce2406) +- 📊 Notes query performance was optimized through database-level filtering and separated access control logic, reducing memory usage and eliminating N+1 query problems for better scalability. [#17607](https://github.com/open-webui/open-webui/pull/17607) [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/da661756fa7eec754270e6dd8c67cbf74a28a17f) +- ⚡ Page loading performance was optimized by deferring API requests until components are actually opened, including ChangelogModal, ModelSelector, RecursiveFolder, ArchivedChatsModal, and SearchModal. [#17542](https://github.com/open-webui/open-webui/pull/17542), [#17555](https://github.com/open-webui/open-webui/pull/17555), [#17557](https://github.com/open-webui/open-webui/pull/17557), [#17541](https://github.com/open-webui/open-webui/pull/17541), [#17640](https://github.com/open-webui/open-webui/pull/17640) +- ⚡ Bundle size was reduced by 1.58MB through optimized highlight.js language support, improving page loading speed and reducing bandwidth usage. [#17645](https://github.com/open-webui/open-webui/pull/17645) +- ⚡ Editor collaboration functionality was refactored to reduce package size by 390KB and minimize compilation errors, improving build performance and reliability. [#17593](https://github.com/open-webui/open-webui/pull/17593) +- ♿ Enhanced user interface accessibility through the addition of unique element IDs, improving targeting for testing, styling, and assistive technologies while providing better semantic markup for screen readers and accessibility tools. [#17746](https://github.com/open-webui/open-webui/pull/17746) +- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security. +- 🌐 Translations for Portuguese (Brazil), Chinese (Simplified and Traditional), Korean, Irish, Spanish, Finnish, French, Kabyle, Russian, and Catalan were enhanced and improved. + +### Fixed + +- 🛡️ SVG content security was enhanced by implementing DOMPurify sanitization to prevent XSS attacks through malicious SVG elements, ensuring safe rendering of user-generated SVG content. [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/750a659a9fee7687e667d9d755e17b8a0c77d557) +- ☁️ OneDrive attachment menu rendering issues were resolved by restructuring the submenu interface from dropdown to tabbed navigation, preventing menu items from being hidden or clipped due to overflow constraints. [#17554](https://github.com/open-webui/open-webui/issues/17554), [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/90e4b49b881b644465831cc3028bb44f0f7a2196) +- 💬 Attached conversation references now persist throughout the entire chat session, ensuring models can continue querying referenced conversations after multiple conversation turns. [#17750](https://github.com/open-webui/open-webui/issues/17750) +- 🔍 Search modal text box focus issues after pinning or unpinning chats were resolved, allowing users to properly exit the search interface by clicking outside the text box. [#17743](https://github.com/open-webui/open-webui/issues/17743) +- 🔍 Search function chat list is now properly updated in real-time when chats are created or deleted, eliminating stale search results and preview loading failures. [#17741](https://github.com/open-webui/open-webui/issues/17741) +- 💬 Chat jitter and delayed code block expansion in multi-model sessions were resolved by reverting dynamic CodeEditor loading, restoring stable rendering behavior. [#17715](https://github.com/open-webui/open-webui/pull/17715), [#17684](https://github.com/open-webui/open-webui/issues/17684) +- 📎 File upload handling was improved to properly recognize uploaded files even when no accompanying text message is provided, resolving issues where attachments were ignored in custom prompts. [#17492](https://github.com/open-webui/open-webui/issues/17492) +- 💬 Chat conversation referencing within projects was restored by including foldered chats in the reference menu, allowing users to properly quote conversations from within their project scope. [#17530](https://github.com/open-webui/open-webui/issues/17530) +- 🔍 RAG query generation is now skipped when all attached files are set to full context mode, preventing unnecessary retrieval operations and improving system efficiency. [#17744](https://github.com/open-webui/open-webui/pull/17744) +- 💾 Memory leaks in file handling and HTTP connections are prevented through proper resource cleanup, ensuring stable memory usage during large file downloads and processing operations. [#17608](https://github.com/open-webui/open-webui/pull/17608) +- 🔐 OAuth access token refresh errors are resolved by properly implementing async/await patterns, preventing "coroutine object has no attribute get" failures during token expiry. [#17585](https://github.com/open-webui/open-webui/issues/17585), [#17678](https://github.com/open-webui/open-webui/issues/17678) +- ⚙️ Valve behavior was improved to properly handle default values and array types, ensuring only explicitly set values are persisted while maintaining consistent distinction between custom and default valve states. [#17664](https://github.com/open-webui/open-webui/pull/17664) +- 🔍 Hybrid search functionality was enhanced to handle inconsistent parameter types and prevent failures when collection results are None, empty, or in unexpected formats. [#17617](https://github.com/open-webui/open-webui/pull/17617) +- 📁 Empty folder deletion is now allowed regardless of chat deletion permission restrictions, resolving cases where users couldn't remove folders after deleting all contained chats. [#17683](https://github.com/open-webui/open-webui/pull/17683) +- 📝 Rich text editor console errors were resolved by adding proper error handling when the TipTap editor view is not available or not yet mounted. [#17697](https://github.com/open-webui/open-webui/issues/17697) +- 🗒️ Hidden models are now properly excluded from the notes section dropdown and default model selection, preventing users from accessing models they shouldn't see. [#17722](https://github.com/open-webui/open-webui/pull/17722) +- 🖼️ AI-generated image download filenames now use a clean, translatable "Generated Image" format instead of potentially problematic response text, improving file management and compatibility. [#17721](https://github.com/open-webui/open-webui/pull/17721) +- 🎨 Toggle switch display issues in the Integrations interface are fixed, preventing background highlighting and obscuring on hover. [#17564](https://github.com/open-webui/open-webui/issues/17564) + +### Changed + +- 👥 Channel permissions now require write access for message posting, editing, and deletion, with existing user groups defaulting to read-only access requiring manual admin migration to write permissions for full participation. +- ☁️ OneDrive environment variable configuration was updated to use separate ONEDRIVE_CLIENT_ID_PERSONAL and ONEDRIVE_CLIENT_ID_BUSINESS variables for better client ID separation, while maintaining backward compatibility with the legacy ONEDRIVE_CLIENT_ID variable. [Docs](https://docs.openwebui.com/tutorials/integrations/onedrive-sharepoint), [Docs](https://docs.openwebui.com/getting-started/env-configuration/#onedrive) + ## [0.6.30] - 2025-09-17 ### Added diff --git a/LICENSE_NOTICE b/LICENSE_NOTICE new file mode 100644 index 0000000000..4e00d46d9a --- /dev/null +++ b/LICENSE_NOTICE @@ -0,0 +1,11 @@ +# Open WebUI Multi-License Notice + +This repository contains code governed by multiple licenses based on the date and origin of contribution: + +1. All code committed prior to commit a76068d69cd59568b920dfab85dc573dbbb8f131 is licensed under the MIT License (see LICENSE_HISTORY). + +2. All code committed from commit a76068d69cd59568b920dfab85dc573dbbb8f131 up to and including commit 60d84a3aae9802339705826e9095e272e3c83623 is licensed under the BSD 3-Clause License (see LICENSE_HISTORY). + +3. All code contributed or modified after commit 60d84a3aae9802339705826e9095e272e3c83623 is licensed under the Open WebUI License (see LICENSE). + +For details on which commits are covered by which license, refer to LICENSE_HISTORY. diff --git a/README.md b/README.md index 9b01496d9f..49c0a8d9d3 100644 --- a/README.md +++ b/README.md @@ -248,7 +248,7 @@ Discover upcoming features on our roadmap in the [Open WebUI Documentation](http ## License 📜 -This project is licensed under the [Open WebUI License](LICENSE), a revised BSD-3-Clause license. You receive all the same rights as the classic BSD-3 license: you can use, modify, and distribute the software, including in proprietary and commercial products, with minimal restrictions. The only additional requirement is to preserve the "Open WebUI" branding, as detailed in the LICENSE file. For full terms, see the [LICENSE](LICENSE) document. 📄 +This project contains code under multiple licenses. The current codebase includes components licensed under the Open WebUI License with an additional requirement to preserve the "Open WebUI" branding, as well as prior contributions under their respective original licenses. For a detailed record of license changes and the applicable terms for each section of the code, please refer to [LICENSE_HISTORY](./LICENSE_HISTORY). For complete and updated licensing details, please see the [LICENSE](./LICENSE) and [LICENSE_HISTORY](./LICENSE_HISTORY) files. ## Support 💬 diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index d3ec3b8c3d..7eb43eb96f 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -222,10 +222,11 @@ class PersistentConfig(Generic[T]): class AppConfig: - _state: dict[str, PersistentConfig] _redis: Union[redis.Redis, redis.cluster.RedisCluster] = None _redis_key_prefix: str + _state: dict[str, PersistentConfig] + def __init__( self, redis_url: Optional[str] = None, @@ -233,9 +234,8 @@ class AppConfig: redis_cluster: Optional[bool] = False, redis_key_prefix: str = "open-webui", ): - super().__setattr__("_state", {}) - super().__setattr__("_redis_key_prefix", redis_key_prefix) if redis_url: + super().__setattr__("_redis_key_prefix", redis_key_prefix) super().__setattr__( "_redis", get_redis_connection( @@ -246,6 +246,8 @@ class AppConfig: ), ) + super().__setattr__("_state", {}) + def __setattr__(self, key, value): if isinstance(value, PersistentConfig): self._state[key] = value @@ -2172,6 +2174,8 @@ ENABLE_ONEDRIVE_INTEGRATION = PersistentConfig( "onedrive.enable", os.getenv("ENABLE_ONEDRIVE_INTEGRATION", "False").lower() == "true", ) + + ENABLE_ONEDRIVE_PERSONAL = ( os.environ.get("ENABLE_ONEDRIVE_PERSONAL", "True").lower() == "true" ) @@ -2179,10 +2183,12 @@ ENABLE_ONEDRIVE_BUSINESS = ( os.environ.get("ENABLE_ONEDRIVE_BUSINESS", "True").lower() == "true" ) -ONEDRIVE_CLIENT_ID = PersistentConfig( - "ONEDRIVE_CLIENT_ID", - "onedrive.client_id", - os.environ.get("ONEDRIVE_CLIENT_ID", ""), +ONEDRIVE_CLIENT_ID = os.environ.get("ONEDRIVE_CLIENT_ID", "") +ONEDRIVE_CLIENT_ID_PERSONAL = os.environ.get( + "ONEDRIVE_CLIENT_ID_PERSONAL", ONEDRIVE_CLIENT_ID +) +ONEDRIVE_CLIENT_ID_BUSINESS = os.environ.get( + "ONEDRIVE_CLIENT_ID_BUSINESS", ONEDRIVE_CLIENT_ID ) ONEDRIVE_SHAREPOINT_URL = PersistentConfig( @@ -2765,6 +2771,12 @@ WEB_SEARCH_TRUST_ENV = PersistentConfig( ) +OLLAMA_CLOUD_WEB_SEARCH_API_KEY = PersistentConfig( + "OLLAMA_CLOUD_WEB_SEARCH_API_KEY", + "rag.web.search.ollama_cloud_api_key", + os.getenv("OLLAMA_CLOUD_API_KEY", ""), +) + SEARXNG_QUERY_URL = PersistentConfig( "SEARXNG_QUERY_URL", "rag.web.search.searxng_query_url", diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index b4fdc97d82..e02424f969 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -474,6 +474,10 @@ ENABLE_OAUTH_ID_TOKEN_COOKIE = ( os.environ.get("ENABLE_OAUTH_ID_TOKEN_COOKIE", "True").lower() == "true" ) +OAUTH_CLIENT_INFO_ENCRYPTION_KEY = os.environ.get( + "OAUTH_CLIENT_INFO_ENCRYPTION_KEY", WEBUI_SECRET_KEY +) + OAUTH_SESSION_TOKEN_ENCRYPTION_KEY = os.environ.get( "OAUTH_SESSION_TOKEN_ENCRYPTION_KEY", WEBUI_SECRET_KEY ) @@ -547,16 +551,16 @@ else: CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = os.environ.get( - "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES", "10" + "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES", "30" ) if CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES == "": - CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10 + CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 30 else: try: CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = int(CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES) except Exception: - CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10 + CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 30 #################################### diff --git a/backend/open_webui/functions.py b/backend/open_webui/functions.py index af6dd1ce1a..d102263cb3 100644 --- a/backend/open_webui/functions.py +++ b/backend/open_webui/functions.py @@ -239,7 +239,7 @@ async def generate_function_chat_completion( oauth_token = None try: if request.cookies.get("oauth_session_id", None): - oauth_token = request.app.state.oauth_manager.get_oauth_token( + oauth_token = await request.app.state.oauth_manager.get_oauth_token( user.id, request.cookies.get("oauth_session_id", None), ) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 5630a58839..f38bd47109 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -50,6 +50,11 @@ from starlette.middleware.sessions import SessionMiddleware from starlette.responses import Response, StreamingResponse from starlette.datastructures import Headers +from starsessions import ( + SessionMiddleware as StarSessionsMiddleware, + SessionAutoloadMiddleware, +) +from starsessions.stores.redis import RedisStore from open_webui.utils import logger from open_webui.utils.audit import AuditLevel, AuditLoggingMiddleware @@ -269,6 +274,7 @@ from open_webui.config import ( WEB_SEARCH_CONCURRENT_REQUESTS, WEB_SEARCH_TRUST_ENV, WEB_SEARCH_DOMAIN_FILTER_LIST, + OLLAMA_CLOUD_WEB_SEARCH_API_KEY, JINA_API_KEY, SEARCHAPI_API_KEY, SEARCHAPI_ENGINE, @@ -301,7 +307,8 @@ from open_webui.config import ( GOOGLE_DRIVE_CLIENT_ID, GOOGLE_DRIVE_API_KEY, ENABLE_ONEDRIVE_INTEGRATION, - ONEDRIVE_CLIENT_ID, + ONEDRIVE_CLIENT_ID_PERSONAL, + ONEDRIVE_CLIENT_ID_BUSINESS, ONEDRIVE_SHAREPOINT_URL, ONEDRIVE_SHAREPOINT_TENANT_ID, ENABLE_ONEDRIVE_PERSONAL, @@ -466,7 +473,12 @@ from open_webui.utils.auth import ( get_verified_user, ) from open_webui.utils.plugin import install_tool_and_function_dependencies -from open_webui.utils.oauth import OAuthManager +from open_webui.utils.oauth import ( + OAuthManager, + OAuthClientManager, + decrypt_data, + OAuthClientInformationFull, +) from open_webui.utils.security_headers import SecurityHeadersMiddleware from open_webui.utils.redis import get_redis_connection @@ -596,9 +608,14 @@ app = FastAPI( lifespan=lifespan, ) +# For Open WebUI OIDC/OAuth2 oauth_manager = OAuthManager(app) app.state.oauth_manager = oauth_manager +# For Integrations +oauth_client_manager = OAuthClientManager(app) +app.state.oauth_client_manager = oauth_client_manager + app.state.instance_id = None app.state.config = AppConfig( redis_url=REDIS_URL, @@ -882,6 +899,8 @@ app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER = BYPASS_WEB_SEARCH_WEB_LOADER app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ENABLE_GOOGLE_DRIVE_INTEGRATION app.state.config.ENABLE_ONEDRIVE_INTEGRATION = ENABLE_ONEDRIVE_INTEGRATION + +app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY = OLLAMA_CLOUD_WEB_SEARCH_API_KEY app.state.config.SEARXNG_QUERY_URL = SEARXNG_QUERY_URL app.state.config.YACY_QUERY_URL = YACY_QUERY_URL app.state.config.YACY_USERNAME = YACY_USERNAME @@ -1530,6 +1549,14 @@ async def chat_completion( except: pass + finally: + try: + if mcp_clients := metadata.get("mcp_clients"): + for client in mcp_clients: + await client.disconnect() + except Exception as e: + log.debug(f"Error cleaning up: {e}") + pass if ( metadata.get("session_id") @@ -1743,7 +1770,8 @@ async def get_app_config(request: Request): "api_key": GOOGLE_DRIVE_API_KEY.value, }, "onedrive": { - "client_id": ONEDRIVE_CLIENT_ID.value, + "client_id_personal": ONEDRIVE_CLIENT_ID_PERSONAL, + "client_id_business": ONEDRIVE_CLIENT_ID_BUSINESS, "sharepoint_url": ONEDRIVE_SHAREPOINT_URL.value, "sharepoint_tenant_id": ONEDRIVE_SHAREPOINT_TENANT_ID.value, }, @@ -1863,14 +1891,78 @@ async def get_current_usage(user=Depends(get_verified_user)): # OAuth Login & Callback ############################ + +# Initialize OAuth client manager with any MCP tool servers using OAuth 2.1 +if len(app.state.config.TOOL_SERVER_CONNECTIONS) > 0: + for tool_server_connection in app.state.config.TOOL_SERVER_CONNECTIONS: + if tool_server_connection.get("type", "openapi") == "mcp": + server_id = tool_server_connection.get("info", {}).get("id") + auth_type = tool_server_connection.get("auth_type", "none") + if server_id and auth_type == "oauth_2.1": + oauth_client_info = tool_server_connection.get("info", {}).get( + "oauth_client_info", "" + ) + + oauth_client_info = decrypt_data(oauth_client_info) + app.state.oauth_client_manager.add_client( + f"mcp:{server_id}", OAuthClientInformationFull(**oauth_client_info) + ) + + # SessionMiddleware is used by authlib for oauth if len(OAUTH_PROVIDERS) > 0: - app.add_middleware( - SessionMiddleware, - secret_key=WEBUI_SECRET_KEY, - session_cookie="oui-session", - same_site=WEBUI_SESSION_COOKIE_SAME_SITE, - https_only=WEBUI_SESSION_COOKIE_SECURE, + try: + if REDIS_URL: + redis_session_store = RedisStore( + url=REDIS_URL, + prefix=( + f"{REDIS_KEY_PREFIX}:session:" if REDIS_KEY_PREFIX else "session:" + ), + ) + + app.add_middleware(SessionAutoloadMiddleware) + app.add_middleware( + StarSessionsMiddleware, + store=redis_session_store, + cookie_name="oui-session", + cookie_same_site=WEBUI_SESSION_COOKIE_SAME_SITE, + cookie_https_only=WEBUI_SESSION_COOKIE_SECURE, + ) + log.info("Using Redis for session") + else: + raise ValueError("No Redis URL provided") + except Exception as e: + app.add_middleware( + SessionMiddleware, + secret_key=WEBUI_SECRET_KEY, + session_cookie="oui-session", + same_site=WEBUI_SESSION_COOKIE_SAME_SITE, + https_only=WEBUI_SESSION_COOKIE_SECURE, + ) + + +@app.get("/oauth/clients/{client_id}/authorize") +async def oauth_client_authorize( + client_id: str, + request: Request, + response: Response, + user=Depends(get_verified_user), +): + return await oauth_client_manager.handle_authorize(request, client_id=client_id) + + +@app.get("/oauth/clients/{client_id}/callback") +async def oauth_client_callback( + client_id: str, + request: Request, + response: Response, + user=Depends(get_verified_user), +): + return await oauth_client_manager.handle_callback( + request, + client_id=client_id, + user_id=user.id if user else None, + response=response, ) @@ -1885,8 +1977,9 @@ async def oauth_login(provider: str, request: Request): # - This is considered insecure in general, as OAuth providers do not always verify email addresses # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user # - Email addresses are considered unique, so we fail registration if the email address is already taken -@app.get("/oauth/{provider}/callback") -async def oauth_callback(provider: str, request: Request, response: Response): +@app.get("/oauth/{provider}/login/callback") +@app.get("/oauth/{provider}/callback") # Legacy endpoint +async def oauth_login_callback(provider: str, request: Request, response: Response): return await oauth_manager.handle_callback(request, provider, response) diff --git a/backend/open_webui/models/channels.py b/backend/open_webui/models/channels.py index 92f238c3a0..e75266be78 100644 --- a/backend/open_webui/models/channels.py +++ b/backend/open_webui/models/channels.py @@ -57,6 +57,10 @@ class ChannelModel(BaseModel): #################### +class ChannelResponse(ChannelModel): + write_access: bool = False + + class ChannelForm(BaseModel): name: str description: Optional[str] = None diff --git a/backend/open_webui/models/chats.py b/backend/open_webui/models/chats.py index cadb5a3a79..97fd9b6256 100644 --- a/backend/open_webui/models/chats.py +++ b/backend/open_webui/models/chats.py @@ -492,11 +492,16 @@ class ChatTable: self, user_id: str, include_archived: bool = False, + include_folders: bool = False, skip: Optional[int] = None, limit: Optional[int] = None, ) -> list[ChatTitleIdResponse]: with get_db() as db: - query = db.query(Chat).filter_by(user_id=user_id).filter_by(folder_id=None) + query = db.query(Chat).filter_by(user_id=user_id) + + if not include_folders: + query = query.filter_by(folder_id=None) + query = query.filter(or_(Chat.pinned == False, Chat.pinned == None)) if not include_archived: @@ -943,6 +948,16 @@ class ChatTable: return count + def count_chats_by_folder_id_and_user_id(self, folder_id: str, user_id: str) -> int: + with get_db() as db: + query = db.query(Chat).filter_by(user_id=user_id) + + query = query.filter_by(folder_id=folder_id) + count = query.count() + + log.info(f"Count of chats for folder '{folder_id}': {count}") + return count + def delete_tag_by_id_and_user_id_and_tag_name( self, id: str, user_id: str, tag_name: str ) -> bool: diff --git a/backend/open_webui/models/files.py b/backend/open_webui/models/files.py index 57978225d4..bf07b5f86f 100644 --- a/backend/open_webui/models/files.py +++ b/backend/open_webui/models/files.py @@ -130,6 +130,17 @@ class FilesTable: except Exception: return None + def get_file_by_id_and_user_id(self, id: str, user_id: str) -> Optional[FileModel]: + with get_db() as db: + try: + file = db.query(File).filter_by(id=id, user_id=user_id).first() + if file: + return FileModel.model_validate(file) + else: + return None + except Exception: + return None + def get_file_metadata_by_id(self, id: str) -> Optional[FileMetadataResponse]: with get_db() as db: try: diff --git a/backend/open_webui/models/notes.py b/backend/open_webui/models/notes.py index b61e820eae..f1b11f071e 100644 --- a/backend/open_webui/models/notes.py +++ b/backend/open_webui/models/notes.py @@ -2,6 +2,7 @@ import json import time import uuid from typing import Optional +from functools import lru_cache from open_webui.internal.db import Base, get_db from open_webui.models.groups import Groups @@ -110,20 +111,72 @@ class NoteTable: return [NoteModel.model_validate(note) for note in notes] def get_notes_by_user_id( + self, + user_id: str, + skip: Optional[int] = None, + limit: Optional[int] = None, + ) -> list[NoteModel]: + with get_db() as db: + query = db.query(Note).filter(Note.user_id == user_id) + query = query.order_by(Note.updated_at.desc()) + + if skip is not None: + query = query.offset(skip) + if limit is not None: + query = query.limit(limit) + + notes = query.all() + return [NoteModel.model_validate(note) for note in notes] + + def get_notes_by_permission( self, user_id: str, permission: str = "write", skip: Optional[int] = None, limit: Optional[int] = None, ) -> list[NoteModel]: - notes = self.get_notes(skip=skip, limit=limit) - user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)} - return [ - note - for note in notes - if note.user_id == user_id - or has_access(user_id, permission, note.access_control, user_group_ids) - ] + with get_db() as db: + user_groups = Groups.get_groups_by_member_id(user_id) + user_group_ids = {group.id for group in user_groups} + + # Order newest-first. We stream to keep memory usage low. + query = ( + db.query(Note) + .order_by(Note.updated_at.desc()) + .execution_options(stream_results=True) + .yield_per(256) + ) + + results: list[NoteModel] = [] + n_skipped = 0 + + for note in query: + # Fast-pass #1: owner + if note.user_id == user_id: + permitted = True + # Fast-pass #2: public/open + elif note.access_control is None: + # Technically this should mean public access for both read and write, but we'll only do read for now + # We might want to change this behavior later + permitted = permission == "read" + else: + permitted = has_access( + user_id, permission, note.access_control, user_group_ids + ) + + if not permitted: + continue + + # Apply skip AFTER permission filtering so it counts only accessible notes + if skip and n_skipped < skip: + n_skipped += 1 + continue + + results.append(NoteModel.model_validate(note)) + if limit is not None and len(results) >= limit: + break + + return results def get_note_by_id(self, id: str) -> Optional[NoteModel]: with get_db() as db: diff --git a/backend/open_webui/models/oauth_sessions.py b/backend/open_webui/models/oauth_sessions.py index 9fd5335ce5..81ce220384 100644 --- a/backend/open_webui/models/oauth_sessions.py +++ b/backend/open_webui/models/oauth_sessions.py @@ -176,6 +176,26 @@ class OAuthSessionTable: log.error(f"Error getting OAuth session by ID: {e}") return None + def get_session_by_provider_and_user_id( + self, provider: str, user_id: str + ) -> Optional[OAuthSessionModel]: + """Get OAuth session by provider and user ID""" + try: + with get_db() as db: + session = ( + db.query(OAuthSession) + .filter_by(provider=provider, user_id=user_id) + .first() + ) + if session: + session.token = self._decrypt_token(session.token) + return OAuthSessionModel.model_validate(session) + + return None + except Exception as e: + log.error(f"Error getting OAuth session by provider and user ID: {e}") + return None + def get_sessions_by_user_id(self, user_id: str) -> List[OAuthSessionModel]: """Get all OAuth sessions for a user""" try: diff --git a/backend/open_webui/models/tools.py b/backend/open_webui/models/tools.py index 3a47fa008d..48f84b3ac4 100644 --- a/backend/open_webui/models/tools.py +++ b/backend/open_webui/models/tools.py @@ -95,6 +95,8 @@ class ToolResponse(BaseModel): class ToolUserResponse(ToolResponse): user: Optional[UserResponse] = None + model_config = ConfigDict(extra="allow") + class ToolForm(BaseModel): id: str diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index aec8de6846..65da1592e1 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -127,7 +127,13 @@ def query_doc_with_hybrid_search( hybrid_bm25_weight: float, ) -> dict: try: - if not collection_result.documents[0]: + if ( + not collection_result + or not hasattr(collection_result, "documents") + or not collection_result.documents + or len(collection_result.documents) == 0 + or not collection_result.documents[0] + ): log.warning(f"query_doc_with_hybrid_search:no_docs {collection_name}") return {"documents": [], "metadatas": [], "distances": []} diff --git a/backend/open_webui/retrieval/web/ollama.py b/backend/open_webui/retrieval/web/ollama.py new file mode 100644 index 0000000000..a199a14389 --- /dev/null +++ b/backend/open_webui/retrieval/web/ollama.py @@ -0,0 +1,51 @@ +import logging +from dataclasses import dataclass +from typing import Optional + +import requests +from open_webui.env import SRC_LOG_LEVELS +from open_webui.retrieval.web.main import SearchResult + +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["RAG"]) + + +def search_ollama_cloud( + url: str, + api_key: str, + query: str, + count: int, + filter_list: Optional[list[str]] = None, +) -> list[SearchResult]: + """Search using Ollama Search API and return the results as a list of SearchResult objects. + + Args: + api_key (str): A Ollama Search API key + query (str): The query to search for + count (int): Number of results to return + filter_list (Optional[list[str]]): List of domains to filter results by + """ + log.info(f"Searching with Ollama for query: {query}") + + headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} + payload = {"query": query, "max_results": count} + + try: + response = requests.post(f"{url}/api/web_search", headers=headers, json=payload) + response.raise_for_status() + data = response.json() + + results = data.get("results", []) + log.info(f"Found {len(results)} results") + + return [ + SearchResult( + link=result.get("url", ""), + title=result.get("title", ""), + snippet=result.get("content", ""), + ) + for result in results + ] + except Exception as e: + log.error(f"Error searching Ollama: {e}") + return [] diff --git a/backend/open_webui/retrieval/web/perplexity_search.py b/backend/open_webui/retrieval/web/perplexity_search.py new file mode 100644 index 0000000000..e3e0caa2b3 --- /dev/null +++ b/backend/open_webui/retrieval/web/perplexity_search.py @@ -0,0 +1,64 @@ +import logging +from typing import Optional, Literal +import requests + +from open_webui.retrieval.web.main import SearchResult, get_filtered_results +from open_webui.env import SRC_LOG_LEVELS + + +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["RAG"]) + + +def search_perplexity_search( + api_key: str, + query: str, + count: int, + filter_list: Optional[list[str]] = None, +) -> list[SearchResult]: + """Search using Perplexity API and return the results as a list of SearchResult objects. + + Args: + api_key (str): A Perplexity API key + query (str): The query to search for + count (int): Maximum number of results to return + filter_list (Optional[list[str]]): List of domains to filter results + + """ + + # Handle PersistentConfig object + if hasattr(api_key, "__str__"): + api_key = str(api_key) + + try: + url = "https://api.perplexity.ai/search" + + # Create payload for the API call + payload = { + "query": query, + "max_results": count, + } + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + # Make the API request + response = requests.request("POST", url, json=payload, headers=headers) + # Parse the JSON response + json_response = response.json() + + # Extract citations from the response + results = json_response.get("results", []) + + return [ + SearchResult( + link=result["url"], title=result["title"], snippet=result["snippet"] + ) + for result in results + ] + + except Exception as e: + log.error(f"Error searching with Perplexity Search API: {e}") + return [] diff --git a/backend/open_webui/routers/channels.py b/backend/open_webui/routers/channels.py index da52be6e79..e7b8366347 100644 --- a/backend/open_webui/routers/channels.py +++ b/backend/open_webui/routers/channels.py @@ -10,7 +10,13 @@ from pydantic import BaseModel from open_webui.socket.main import sio, get_user_ids_from_room from open_webui.models.users import Users, UserNameResponse -from open_webui.models.channels import Channels, ChannelModel, ChannelForm +from open_webui.models.groups import Groups +from open_webui.models.channels import ( + Channels, + ChannelModel, + ChannelForm, + ChannelResponse, +) from open_webui.models.messages import ( Messages, MessageModel, @@ -80,7 +86,7 @@ async def create_new_channel(form_data: ChannelForm, user=Depends(get_admin_user ############################ -@router.get("/{id}", response_model=Optional[ChannelModel]) +@router.get("/{id}", response_model=Optional[ChannelResponse]) async def get_channel_by_id(id: str, user=Depends(get_verified_user)): channel = Channels.get_channel_by_id(id) if not channel: @@ -95,7 +101,16 @@ async def get_channel_by_id(id: str, user=Depends(get_verified_user)): status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() ) - return ChannelModel(**channel.model_dump()) + write_access = has_access( + user.id, type="write", access_control=channel.access_control, strict=False + ) + + return ChannelResponse( + **{ + **channel.model_dump(), + "write_access": write_access or user.role == "admin", + } + ) ############################ @@ -275,6 +290,7 @@ async def model_response_handler(request, channel, message, user): ) thread_history = [] + images = [] message_users = {} for thread_message in thread_messages: @@ -303,6 +319,11 @@ async def model_response_handler(request, channel, message, user): f"{username}: {replace_mentions(thread_message.content)}" ) + thread_message_files = thread_message.data.get("files", []) + for file in thread_message_files: + if file.get("type", "") == "image": + images.append(file.get("url", "")) + system_message = { "role": "system", "content": f"You are {model.get('name', model_id)}, an AI assistant participating in a threaded conversation. Be helpful, concise, and conversational." @@ -313,14 +334,29 @@ async def model_response_handler(request, channel, message, user): ), } + content = f"{user.name if user else 'User'}: {message_content}" + if images: + content = [ + { + "type": "text", + "text": content, + }, + *[ + { + "type": "image_url", + "image_url": { + "url": image, + }, + } + for image in images + ], + ] + form_data = { "model": model_id, "messages": [ system_message, - { - "role": "user", - "content": f"{user.name if user else 'User'}: {message_content}", - }, + {"role": "user", "content": content}, ], "stream": False, } @@ -362,7 +398,7 @@ async def new_message_handler( ) if user.role != "admin" and not has_access( - user.id, type="read", access_control=channel.access_control + user.id, type="write", access_control=channel.access_control, strict=False ): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() @@ -658,7 +694,7 @@ async def add_reaction_to_message( ) if user.role != "admin" and not has_access( - user.id, type="read", access_control=channel.access_control + user.id, type="write", access_control=channel.access_control, strict=False ): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() @@ -724,7 +760,7 @@ async def remove_reaction_by_id_and_user_id_and_name( ) if user.role != "admin" and not has_access( - user.id, type="read", access_control=channel.access_control + user.id, type="write", access_control=channel.access_control, strict=False ): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() @@ -806,7 +842,9 @@ async def delete_message_by_id( if ( user.role != "admin" and message.user_id != user.id - and not has_access(user.id, type="read", access_control=channel.access_control) + and not has_access( + user.id, type="write", access_control=channel.access_control, strict=False + ) ): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() diff --git a/backend/open_webui/routers/chats.py b/backend/open_webui/routers/chats.py index 847368412e..788e355f2b 100644 --- a/backend/open_webui/routers/chats.py +++ b/backend/open_webui/routers/chats.py @@ -37,7 +37,9 @@ router = APIRouter() @router.get("/", response_model=list[ChatTitleIdResponse]) @router.get("/list", response_model=list[ChatTitleIdResponse]) def get_session_user_chat_list( - user=Depends(get_verified_user), page: Optional[int] = None + user=Depends(get_verified_user), + page: Optional[int] = None, + include_folders: Optional[bool] = False, ): try: if page is not None: @@ -45,10 +47,12 @@ def get_session_user_chat_list( skip = (page - 1) * limit return Chats.get_chat_title_id_list_by_user_id( - user.id, skip=skip, limit=limit + user.id, include_folders=include_folders, skip=skip, limit=limit ) else: - return Chats.get_chat_title_id_list_by_user_id(user.id) + return Chats.get_chat_title_id_list_by_user_id( + user.id, include_folders=include_folders + ) except Exception as e: log.exception(e) raise HTTPException( diff --git a/backend/open_webui/routers/configs.py b/backend/open_webui/routers/configs.py index 8ce4e0d247..d4b88032e2 100644 --- a/backend/open_webui/routers/configs.py +++ b/backend/open_webui/routers/configs.py @@ -1,5 +1,7 @@ +import logging from fastapi import APIRouter, Depends, Request, HTTPException from pydantic import BaseModel, ConfigDict +import aiohttp from typing import Optional @@ -12,10 +14,24 @@ from open_webui.utils.tools import ( get_tool_server_url, set_tool_servers, ) +from open_webui.utils.mcp.client import MCPClient +from open_webui.env import SRC_LOG_LEVELS + +from open_webui.utils.oauth import ( + get_discovery_urls, + get_oauth_client_info_with_dynamic_client_registration, + encrypt_data, + decrypt_data, + OAuthClientInformationFull, +) +from mcp.shared.auth import OAuthMetadata router = APIRouter() +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["MAIN"]) + ############################ # ImportConfig @@ -79,6 +95,43 @@ async def set_connections_config( } +class OAuthClientRegistrationForm(BaseModel): + url: str + client_id: str + client_name: Optional[str] = None + + +@router.post("/oauth/clients/register") +async def register_oauth_client( + request: Request, + form_data: OAuthClientRegistrationForm, + type: Optional[str] = None, + user=Depends(get_admin_user), +): + try: + oauth_client_id = form_data.client_id + if type: + oauth_client_id = f"{type}:{form_data.client_id}" + + oauth_client_info = ( + await get_oauth_client_info_with_dynamic_client_registration( + request, oauth_client_id, form_data.url + ) + ) + return { + "status": True, + "oauth_client_info": encrypt_data( + oauth_client_info.model_dump(mode="json") + ), + } + except Exception as e: + log.debug(f"Failed to register OAuth client: {e}") + raise HTTPException( + status_code=400, + detail=f"Failed to register OAuth client", + ) + + ############################ # ToolServers Config ############################ @@ -87,6 +140,7 @@ async def set_connections_config( class ToolServerConnection(BaseModel): url: str path: str + type: Optional[str] = "openapi" # openapi, mcp auth_type: Optional[str] key: Optional[str] config: Optional[dict] @@ -114,8 +168,29 @@ async def set_tool_servers_config( request.app.state.config.TOOL_SERVER_CONNECTIONS = [ connection.model_dump() for connection in form_data.TOOL_SERVER_CONNECTIONS ] + await set_tool_servers(request) + for connection in request.app.state.config.TOOL_SERVER_CONNECTIONS: + server_type = connection.get("type", "openapi") + if server_type == "mcp": + server_id = connection.get("info", {}).get("id") + auth_type = connection.get("auth_type", "none") + if auth_type == "oauth_2.1" and server_id: + try: + oauth_client_info = connection.get("info", {}).get( + "oauth_client_info", "" + ) + oauth_client_info = decrypt_data(oauth_client_info) + + await request.app.state.oauth_client_manager.add_client( + f"{server_type}:{server_id}", + OAuthClientInformationFull(**oauth_client_info), + ) + except Exception as e: + log.debug(f"Failed to add OAuth client for MCP tool server: {e}") + continue + return { "TOOL_SERVER_CONNECTIONS": request.app.state.config.TOOL_SERVER_CONNECTIONS, } @@ -129,19 +204,105 @@ async def verify_tool_servers_config( Verify the connection to the tool server. """ try: + if form_data.type == "mcp": + if form_data.auth_type == "oauth_2.1": + discovery_urls = get_discovery_urls(form_data.url) + async with aiohttp.ClientSession() as session: + async with session.get( + discovery_urls[0] + ) as oauth_server_metadata_response: + if oauth_server_metadata_response.status != 200: + raise HTTPException( + status_code=400, + detail=f"Failed to fetch OAuth 2.1 discovery document from {discovery_urls[0]}", + ) - token = None - if form_data.auth_type == "bearer": - token = form_data.key - elif form_data.auth_type == "session": - token = request.state.token.credentials + try: + oauth_server_metadata = OAuthMetadata.model_validate( + await oauth_server_metadata_response.json() + ) + return { + "status": True, + "oauth_server_metadata": oauth_server_metadata.model_dump( + mode="json" + ), + } + except Exception as e: + log.info( + f"Failed to parse OAuth 2.1 discovery document: {e}" + ) + raise HTTPException( + status_code=400, + detail=f"Failed to parse OAuth 2.1 discovery document from {discovery_urls[0]}", + ) - url = get_tool_server_url(form_data.url, form_data.path) - return await get_tool_server_data(token, url) + raise HTTPException( + status_code=400, + detail=f"Failed to fetch OAuth 2.1 discovery document from {discovery_urls[0]}", + ) + else: + try: + client = MCPClient() + headers = None + + token = None + if form_data.auth_type == "bearer": + token = form_data.key + elif form_data.auth_type == "session": + token = request.state.token.credentials + elif form_data.auth_type == "system_oauth": + try: + if request.cookies.get("oauth_session_id", None): + token = await request.app.state.oauth_manager.get_oauth_token( + user.id, + request.cookies.get("oauth_session_id", None), + ) + except Exception as e: + pass + + if token: + headers = {"Authorization": f"Bearer {token}"} + + await client.connect(form_data.url, headers=headers) + specs = await client.list_tool_specs() + return { + "status": True, + "specs": specs, + } + except Exception as e: + log.debug(f"Failed to create MCP client: {e}") + raise HTTPException( + status_code=400, + detail=f"Failed to create MCP client", + ) + finally: + if client: + await client.disconnect() + else: # openapi + token = None + if form_data.auth_type == "bearer": + token = form_data.key + elif form_data.auth_type == "session": + token = request.state.token.credentials + elif form_data.auth_type == "system_oauth": + try: + if request.cookies.get("oauth_session_id", None): + token = await request.app.state.oauth_manager.get_oauth_token( + user.id, + request.cookies.get("oauth_session_id", None), + ) + except Exception as e: + pass + + url = get_tool_server_url(form_data.url, form_data.path) + return await get_tool_server_data(token, url) + except HTTPException as e: + raise e except Exception as e: + log.debug(f"Failed to connect to the tool server: {e}") raise HTTPException( status_code=400, - detail=f"Failed to connect to the tool server: {str(e)}", + detail=f"Failed to connect to the tool server", ) diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py index 36dbfee5c5..ddee71ea4d 100644 --- a/backend/open_webui/routers/folders.py +++ b/backend/open_webui/routers/folders.py @@ -262,15 +262,15 @@ async def update_folder_is_expanded_by_id( async def delete_folder_by_id( request: Request, id: str, user=Depends(get_verified_user) ): - chat_delete_permission = has_permission( - user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS - ) - - if user.role != "admin" and not chat_delete_permission: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=ERROR_MESSAGES.ACCESS_PROHIBITED, + if Chats.count_chats_by_folder_id_and_user_id(id, user.id): + chat_delete_permission = has_permission( + user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS ) + if user.role != "admin" and not chat_delete_permission: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=ERROR_MESSAGES.ACCESS_PROHIBITED, + ) folder = Folders.get_folder_by_id_and_user_id(id, user.id) if folder: diff --git a/backend/open_webui/routers/functions.py b/backend/open_webui/routers/functions.py index 202aa74ca4..c36e656d5f 100644 --- a/backend/open_webui/routers/functions.py +++ b/backend/open_webui/routers/functions.py @@ -431,8 +431,10 @@ async def update_function_valves_by_id( try: form_data = {k: v for k, v in form_data.items() if v is not None} valves = Valves(**form_data) - Functions.update_function_valves_by_id(id, valves.model_dump()) - return valves.model_dump() + + valves_dict = valves.model_dump(exclude_unset=True) + Functions.update_function_valves_by_id(id, valves_dict) + return valves_dict except Exception as e: log.exception(f"Error updating function values by id {id}: {e}") raise HTTPException( @@ -514,10 +516,11 @@ async def update_function_user_valves_by_id( try: form_data = {k: v for k, v in form_data.items() if v is not None} user_valves = UserValves(**form_data) + user_valves_dict = user_valves.model_dump(exclude_unset=True) Functions.update_user_valves_by_id_and_user_id( - id, user.id, user_valves.model_dump() + id, user.id, user_valves_dict ) - return user_valves.model_dump() + return user_valves_dict except Exception as e: log.exception(f"Error updating function user valves by id {id}: {e}") raise HTTPException( diff --git a/backend/open_webui/routers/images.py b/backend/open_webui/routers/images.py index 802a3e9924..059b3a23d7 100644 --- a/backend/open_webui/routers/images.py +++ b/backend/open_webui/routers/images.py @@ -514,6 +514,7 @@ async def image_generations( size = form_data.size width, height = tuple(map(int, size.split("x"))) + model = get_image_model(request) r = None try: @@ -531,11 +532,7 @@ async def image_generations( headers["X-OpenWebUI-User-Role"] = user.role data = { - "model": ( - request.app.state.config.IMAGE_GENERATION_MODEL - if request.app.state.config.IMAGE_GENERATION_MODEL != "" - else "dall-e-2" - ), + "model": model, "prompt": form_data.prompt, "n": form_data.n, "size": ( @@ -584,7 +581,6 @@ async def image_generations( headers["Content-Type"] = "application/json" headers["x-goog-api-key"] = request.app.state.config.IMAGES_GEMINI_API_KEY - model = get_image_model(request) data = { "instances": {"prompt": form_data.prompt}, "parameters": { @@ -640,7 +636,7 @@ async def image_generations( } ) res = await comfyui_generate_image( - request.app.state.config.IMAGE_GENERATION_MODEL, + model, form_data, user.id, request.app.state.config.COMFYUI_BASE_URL, diff --git a/backend/open_webui/routers/notes.py b/backend/open_webui/routers/notes.py index dff7bc2e7f..0c420e4f12 100644 --- a/backend/open_webui/routers/notes.py +++ b/backend/open_webui/routers/notes.py @@ -48,7 +48,7 @@ async def get_notes(request: Request, user=Depends(get_verified_user)): "user": UserResponse(**Users.get_user_by_id(note.user_id).model_dump()), } ) - for note in Notes.get_notes_by_user_id(user.id, "write") + for note in Notes.get_notes_by_permission(user.id, "write") ] return notes @@ -81,7 +81,9 @@ async def get_note_list( notes = [ NoteTitleIdResponse(**note.model_dump()) - for note in Notes.get_notes_by_user_id(user.id, "write", skip=skip, limit=limit) + for note in Notes.get_notes_by_permission( + user.id, "write", skip=skip, limit=limit + ) ] return notes diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index 8dadf3523a..bf11ffa0dd 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -1694,25 +1694,27 @@ async def download_file_stream( yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n' if done: - file.seek(0) - chunk_size = 1024 * 1024 * 2 - hashed = calculate_sha256(file, chunk_size) - file.seek(0) + file.close() - url = f"{ollama_url}/api/blobs/sha256:{hashed}" - response = requests.post(url, data=file) + with open(file_path, "rb") as file: + chunk_size = 1024 * 1024 * 2 + hashed = calculate_sha256(file, chunk_size) - if response.ok: - res = { - "done": done, - "blob": f"sha256:{hashed}", - "name": file_name, - } - os.remove(file_path) + url = f"{ollama_url}/api/blobs/sha256:{hashed}" + with requests.Session() as session: + response = session.post(url, data=file, timeout=30) - yield f"data: {json.dumps(res)}\n\n" - else: - raise "Ollama: Could not create blob, Please try again." + if response.ok: + res = { + "done": done, + "blob": f"sha256:{hashed}", + "name": file_name, + } + os.remove(file_path) + + yield f"data: {json.dumps(res)}\n\n" + else: + raise "Ollama: Could not create blob, Please try again." # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf" diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index 3154be2ee6..e8865b90a0 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -121,7 +121,7 @@ def openai_reasoning_model_handler(payload): return payload -def get_headers_and_cookies( +async def get_headers_and_cookies( request: Request, url, key=None, @@ -174,7 +174,7 @@ def get_headers_and_cookies( oauth_token = None try: if request.cookies.get("oauth_session_id", None): - oauth_token = request.app.state.oauth_manager.get_oauth_token( + oauth_token = await request.app.state.oauth_manager.get_oauth_token( user.id, request.cookies.get("oauth_session_id", None), ) @@ -305,7 +305,7 @@ async def speech(request: Request, user=Depends(get_verified_user)): request.app.state.config.OPENAI_API_CONFIGS.get(url, {}), # Legacy support ) - headers, cookies = get_headers_and_cookies( + headers, cookies = await get_headers_and_cookies( request, url, key, api_config, user=user ) @@ -570,7 +570,7 @@ async def get_models( timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST), ) as session: try: - headers, cookies = get_headers_and_cookies( + headers, cookies = await get_headers_and_cookies( request, url, key, api_config, user=user ) @@ -656,7 +656,7 @@ async def verify_connection( timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST), ) as session: try: - headers, cookies = get_headers_and_cookies( + headers, cookies = await get_headers_and_cookies( request, url, key, api_config, user=user ) @@ -901,7 +901,7 @@ async def generate_chat_completion( convert_logit_bias_input_to_json(payload["logit_bias"]) ) - headers, cookies = get_headers_and_cookies( + headers, cookies = await get_headers_and_cookies( request, url, key, api_config, metadata, user=user ) @@ -1010,7 +1010,9 @@ async def embeddings(request: Request, form_data: dict, user): session = None streaming = False - headers, cookies = get_headers_and_cookies(request, url, key, api_config, user=user) + headers, cookies = await get_headers_and_cookies( + request, url, key, api_config, user=user + ) try: session = aiohttp.ClientSession(trust_env=True) r = await session.request( @@ -1080,7 +1082,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): streaming = False try: - headers, cookies = get_headers_and_cookies( + headers, cookies = await get_headers_and_cookies( request, url, key, api_config, user=user ) diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 0ddf824efa..3681008c87 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -45,6 +45,8 @@ from open_webui.retrieval.loaders.youtube import YoutubeLoader # Web search engines from open_webui.retrieval.web.main import SearchResult from open_webui.retrieval.web.utils import get_web_loader +from open_webui.retrieval.web.ollama import search_ollama_cloud +from open_webui.retrieval.web.perplexity_search import search_perplexity_search from open_webui.retrieval.web.brave import search_brave from open_webui.retrieval.web.kagi import search_kagi from open_webui.retrieval.web.mojeek import search_mojeek @@ -469,6 +471,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "WEB_SEARCH_DOMAIN_FILTER_LIST": request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST, "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL, "BYPASS_WEB_SEARCH_WEB_LOADER": request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER, + "OLLAMA_CLOUD_WEB_SEARCH_API_KEY": request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY, "SEARXNG_QUERY_URL": request.app.state.config.SEARXNG_QUERY_URL, "YACY_QUERY_URL": request.app.state.config.YACY_QUERY_URL, "YACY_USERNAME": request.app.state.config.YACY_USERNAME, @@ -525,6 +528,7 @@ class WebConfig(BaseModel): WEB_SEARCH_DOMAIN_FILTER_LIST: Optional[List[str]] = [] BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None BYPASS_WEB_SEARCH_WEB_LOADER: Optional[bool] = None + OLLAMA_CLOUD_WEB_SEARCH_API_KEY: Optional[str] = None SEARXNG_QUERY_URL: Optional[str] = None YACY_QUERY_URL: Optional[str] = None YACY_USERNAME: Optional[str] = None @@ -988,6 +992,9 @@ async def update_rag_config( request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER = ( form_data.web.BYPASS_WEB_SEARCH_WEB_LOADER ) + request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY = ( + form_data.web.OLLAMA_CLOUD_WEB_SEARCH_API_KEY + ) request.app.state.config.SEARXNG_QUERY_URL = form_data.web.SEARXNG_QUERY_URL request.app.state.config.YACY_QUERY_URL = form_data.web.YACY_QUERY_URL request.app.state.config.YACY_USERNAME = form_data.web.YACY_USERNAME @@ -1139,6 +1146,7 @@ async def update_rag_config( "WEB_SEARCH_DOMAIN_FILTER_LIST": request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST, "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL, "BYPASS_WEB_SEARCH_WEB_LOADER": request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER, + "OLLAMA_CLOUD_WEB_SEARCH_API_KEY": request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY, "SEARXNG_QUERY_URL": request.app.state.config.SEARXNG_QUERY_URL, "YACY_QUERY_URL": request.app.state.config.YACY_QUERY_URL, "YACY_USERNAME": request.app.state.config.YACY_USERNAME, @@ -1407,59 +1415,35 @@ def process_file( form_data: ProcessFileForm, user=Depends(get_verified_user), ): - try: + if user.role == "admin": file = Files.get_file_by_id(form_data.file_id) + else: + file = Files.get_file_by_id_and_user_id(form_data.file_id, user.id) - collection_name = form_data.collection_name + if file: + try: - if collection_name is None: - collection_name = f"file-{file.id}" + collection_name = form_data.collection_name - if form_data.content: - # Update the content in the file - # Usage: /files/{file_id}/data/content/update, /files/ (audio file upload pipeline) + if collection_name is None: + collection_name = f"file-{file.id}" - try: - # /files/{file_id}/data/content/update - VECTOR_DB_CLIENT.delete_collection(collection_name=f"file-{file.id}") - except: - # Audio file upload pipeline - pass + if form_data.content: + # Update the content in the file + # Usage: /files/{file_id}/data/content/update, /files/ (audio file upload pipeline) - docs = [ - Document( - page_content=form_data.content.replace("
", "\n"), - metadata={ - **file.meta, - "name": file.filename, - "created_by": file.user_id, - "file_id": file.id, - "source": file.filename, - }, - ) - ] - - text_content = form_data.content - elif form_data.collection_name: - # Check if the file has already been processed and save the content - # Usage: /knowledge/{id}/file/add, /knowledge/{id}/file/update - - result = VECTOR_DB_CLIENT.query( - collection_name=f"file-{file.id}", filter={"file_id": file.id} - ) - - if result is not None and len(result.ids[0]) > 0: - docs = [ - Document( - page_content=result.documents[0][idx], - metadata=result.metadatas[0][idx], + try: + # /files/{file_id}/data/content/update + VECTOR_DB_CLIENT.delete_collection( + collection_name=f"file-{file.id}" ) - for idx, id in enumerate(result.ids[0]) - ] - else: + except: + # Audio file upload pipeline + pass + docs = [ Document( - page_content=file.data.get("content", ""), + page_content=form_data.content.replace("
", "\n"), metadata={ **file.meta, "name": file.filename, @@ -1470,149 +1454,190 @@ def process_file( ) ] - text_content = file.data.get("content", "") - else: - # Process the file and save the content - # Usage: /files/ - file_path = file.path - if file_path: - file_path = Storage.get_file(file_path) - loader = Loader( - engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE, - DATALAB_MARKER_API_KEY=request.app.state.config.DATALAB_MARKER_API_KEY, - DATALAB_MARKER_API_BASE_URL=request.app.state.config.DATALAB_MARKER_API_BASE_URL, - DATALAB_MARKER_ADDITIONAL_CONFIG=request.app.state.config.DATALAB_MARKER_ADDITIONAL_CONFIG, - DATALAB_MARKER_SKIP_CACHE=request.app.state.config.DATALAB_MARKER_SKIP_CACHE, - DATALAB_MARKER_FORCE_OCR=request.app.state.config.DATALAB_MARKER_FORCE_OCR, - DATALAB_MARKER_PAGINATE=request.app.state.config.DATALAB_MARKER_PAGINATE, - DATALAB_MARKER_STRIP_EXISTING_OCR=request.app.state.config.DATALAB_MARKER_STRIP_EXISTING_OCR, - DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION=request.app.state.config.DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION, - DATALAB_MARKER_FORMAT_LINES=request.app.state.config.DATALAB_MARKER_FORMAT_LINES, - DATALAB_MARKER_USE_LLM=request.app.state.config.DATALAB_MARKER_USE_LLM, - DATALAB_MARKER_OUTPUT_FORMAT=request.app.state.config.DATALAB_MARKER_OUTPUT_FORMAT, - EXTERNAL_DOCUMENT_LOADER_URL=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_URL, - EXTERNAL_DOCUMENT_LOADER_API_KEY=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY, - TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL, - DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL, - DOCLING_PARAMS={ - "do_ocr": request.app.state.config.DOCLING_DO_OCR, - "force_ocr": request.app.state.config.DOCLING_FORCE_OCR, - "ocr_engine": request.app.state.config.DOCLING_OCR_ENGINE, - "ocr_lang": request.app.state.config.DOCLING_OCR_LANG, - "pdf_backend": request.app.state.config.DOCLING_PDF_BACKEND, - "table_mode": request.app.state.config.DOCLING_TABLE_MODE, - "pipeline": request.app.state.config.DOCLING_PIPELINE, - "do_picture_description": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION, - "picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE, - "picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL, - "picture_description_api": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_API, - }, - PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, - DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, - DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, - MISTRAL_OCR_API_KEY=request.app.state.config.MISTRAL_OCR_API_KEY, - ) - docs = loader.load( - file.filename, file.meta.get("content_type"), file_path + text_content = form_data.content + elif form_data.collection_name: + # Check if the file has already been processed and save the content + # Usage: /knowledge/{id}/file/add, /knowledge/{id}/file/update + + result = VECTOR_DB_CLIENT.query( + collection_name=f"file-{file.id}", filter={"file_id": file.id} ) - docs = [ - Document( - page_content=doc.page_content, - metadata={ - **doc.metadata, - "name": file.filename, - "created_by": file.user_id, - "file_id": file.id, - "source": file.filename, - }, - ) - for doc in docs - ] - else: - docs = [ - Document( - page_content=file.data.get("content", ""), - metadata={ - **file.meta, - "name": file.filename, - "created_by": file.user_id, - "file_id": file.id, - "source": file.filename, - }, - ) - ] - text_content = " ".join([doc.page_content for doc in docs]) - - log.debug(f"text_content: {text_content}") - Files.update_file_data_by_id( - file.id, - {"content": text_content}, - ) - hash = calculate_sha256_string(text_content) - Files.update_file_hash_by_id(file.id, hash) - - if request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL: - Files.update_file_data_by_id(file.id, {"status": "completed"}) - return { - "status": True, - "collection_name": None, - "filename": file.filename, - "content": text_content, - } - else: - try: - result = save_docs_to_vector_db( - request, - docs=docs, - collection_name=collection_name, - metadata={ - "file_id": file.id, - "name": file.filename, - "hash": hash, - }, - add=(True if form_data.collection_name else False), - user=user, - ) - log.info(f"added {len(docs)} items to collection {collection_name}") - - if result: - Files.update_file_metadata_by_id( - file.id, - { - "collection_name": collection_name, - }, - ) - - Files.update_file_data_by_id( - file.id, - {"status": "completed"}, - ) - - return { - "status": True, - "collection_name": collection_name, - "filename": file.filename, - "content": text_content, - } + if result is not None and len(result.ids[0]) > 0: + docs = [ + Document( + page_content=result.documents[0][idx], + metadata=result.metadatas[0][idx], + ) + for idx, id in enumerate(result.ids[0]) + ] else: - raise Exception("Error saving document to vector database") - except Exception as e: - raise e + docs = [ + Document( + page_content=file.data.get("content", ""), + metadata={ + **file.meta, + "name": file.filename, + "created_by": file.user_id, + "file_id": file.id, + "source": file.filename, + }, + ) + ] - except Exception as e: - log.exception(e) - if "No pandoc was found" in str(e): - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED, + text_content = file.data.get("content", "") + else: + # Process the file and save the content + # Usage: /files/ + file_path = file.path + if file_path: + file_path = Storage.get_file(file_path) + loader = Loader( + engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE, + DATALAB_MARKER_API_KEY=request.app.state.config.DATALAB_MARKER_API_KEY, + DATALAB_MARKER_API_BASE_URL=request.app.state.config.DATALAB_MARKER_API_BASE_URL, + DATALAB_MARKER_ADDITIONAL_CONFIG=request.app.state.config.DATALAB_MARKER_ADDITIONAL_CONFIG, + DATALAB_MARKER_SKIP_CACHE=request.app.state.config.DATALAB_MARKER_SKIP_CACHE, + DATALAB_MARKER_FORCE_OCR=request.app.state.config.DATALAB_MARKER_FORCE_OCR, + DATALAB_MARKER_PAGINATE=request.app.state.config.DATALAB_MARKER_PAGINATE, + DATALAB_MARKER_STRIP_EXISTING_OCR=request.app.state.config.DATALAB_MARKER_STRIP_EXISTING_OCR, + DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION=request.app.state.config.DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION, + DATALAB_MARKER_FORMAT_LINES=request.app.state.config.DATALAB_MARKER_FORMAT_LINES, + DATALAB_MARKER_USE_LLM=request.app.state.config.DATALAB_MARKER_USE_LLM, + DATALAB_MARKER_OUTPUT_FORMAT=request.app.state.config.DATALAB_MARKER_OUTPUT_FORMAT, + EXTERNAL_DOCUMENT_LOADER_URL=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_URL, + EXTERNAL_DOCUMENT_LOADER_API_KEY=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY, + TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL, + DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL, + DOCLING_PARAMS={ + "do_ocr": request.app.state.config.DOCLING_DO_OCR, + "force_ocr": request.app.state.config.DOCLING_FORCE_OCR, + "ocr_engine": request.app.state.config.DOCLING_OCR_ENGINE, + "ocr_lang": request.app.state.config.DOCLING_OCR_LANG, + "pdf_backend": request.app.state.config.DOCLING_PDF_BACKEND, + "table_mode": request.app.state.config.DOCLING_TABLE_MODE, + "pipeline": request.app.state.config.DOCLING_PIPELINE, + "do_picture_description": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION, + "picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE, + "picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL, + "picture_description_api": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_API, + }, + PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, + DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, + DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, + MISTRAL_OCR_API_KEY=request.app.state.config.MISTRAL_OCR_API_KEY, + ) + docs = loader.load( + file.filename, file.meta.get("content_type"), file_path + ) + + docs = [ + Document( + page_content=doc.page_content, + metadata={ + **doc.metadata, + "name": file.filename, + "created_by": file.user_id, + "file_id": file.id, + "source": file.filename, + }, + ) + for doc in docs + ] + else: + docs = [ + Document( + page_content=file.data.get("content", ""), + metadata={ + **file.meta, + "name": file.filename, + "created_by": file.user_id, + "file_id": file.id, + "source": file.filename, + }, + ) + ] + text_content = " ".join([doc.page_content for doc in docs]) + + log.debug(f"text_content: {text_content}") + Files.update_file_data_by_id( + file.id, + {"content": text_content}, ) - else: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=str(e), + hash = calculate_sha256_string(text_content) + Files.update_file_hash_by_id(file.id, hash) + + if request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL: + Files.update_file_data_by_id(file.id, {"status": "completed"}) + return { + "status": True, + "collection_name": None, + "filename": file.filename, + "content": text_content, + } + else: + try: + result = save_docs_to_vector_db( + request, + docs=docs, + collection_name=collection_name, + metadata={ + "file_id": file.id, + "name": file.filename, + "hash": hash, + }, + add=(True if form_data.collection_name else False), + user=user, + ) + log.info(f"added {len(docs)} items to collection {collection_name}") + + if result: + Files.update_file_metadata_by_id( + file.id, + { + "collection_name": collection_name, + }, + ) + + Files.update_file_data_by_id( + file.id, + {"status": "completed"}, + ) + + return { + "status": True, + "collection_name": collection_name, + "filename": file.filename, + "content": text_content, + } + else: + raise Exception("Error saving document to vector database") + except Exception as e: + raise e + + except Exception as e: + log.exception(e) + Files.update_file_data_by_id( + file.id, + {"status": "failed"}, ) + if "No pandoc was found" in str(e): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED, + ) + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=str(e), + ) + + else: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND + ) + class ProcessTextForm(BaseModel): name: str @@ -1769,7 +1794,25 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]: """ # TODO: add playwright to search the web - if engine == "searxng": + if engine == "ollama_cloud": + return search_ollama_cloud( + "https://ollama.com", + request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY, + query, + request.app.state.config.WEB_SEARCH_RESULT_COUNT, + request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST, + ) + elif engine == "perplexity_search": + if request.app.state.config.PERPLEXITY_API_KEY: + return search_perplexity_search( + request.app.state.config.PERPLEXITY_API_KEY, + query, + request.app.state.config.WEB_SEARCH_RESULT_COUNT, + request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST, + ) + else: + raise Exception("No PERPLEXITY_API_KEY found in environment variables") + elif engine == "searxng": if request.app.state.config.SEARXNG_QUERY_URL: return search_searxng( request.app.state.config.SEARXNG_QUERY_URL, diff --git a/backend/open_webui/routers/tools.py b/backend/open_webui/routers/tools.py index 5f82e7f1bd..eb66a86825 100644 --- a/backend/open_webui/routers/tools.py +++ b/backend/open_webui/routers/tools.py @@ -9,6 +9,7 @@ from pydantic import BaseModel, HttpUrl from fastapi import APIRouter, Depends, HTTPException, Request, status +from open_webui.models.oauth_sessions import OAuthSessions from open_webui.models.tools import ( ToolForm, ToolModel, @@ -41,8 +42,17 @@ router = APIRouter() @router.get("/", response_model=list[ToolUserResponse]) async def get_tools(request: Request, user=Depends(get_verified_user)): - tools = Tools.get_tools() + tools = [ + ToolUserResponse( + **{ + **tool.model_dump(), + "has_user_valves": "class UserValves(BaseModel):" in tool.content, + } + ) + for tool in Tools.get_tools() + ] + # OpenAPI Tool Servers for server in await get_tool_servers(request): tools.append( ToolUserResponse( @@ -68,6 +78,50 @@ async def get_tools(request: Request, user=Depends(get_verified_user)): ) ) + # MCP Tool Servers + for server in request.app.state.config.TOOL_SERVER_CONNECTIONS: + if server.get("type", "openapi") == "mcp": + server_id = server.get("info", {}).get("id") + auth_type = server.get("auth_type", "none") + + session_token = None + if auth_type == "oauth_2.1": + splits = server_id.split(":") + server_id = splits[-1] if len(splits) > 1 else server_id + + session_token = ( + await request.app.state.oauth_client_manager.get_oauth_token( + user.id, f"mcp:{server_id}" + ) + ) + + tools.append( + ToolUserResponse( + **{ + "id": f"server:mcp:{server.get('info', {}).get('id')}", + "user_id": f"server:mcp:{server.get('info', {}).get('id')}", + "name": server.get("info", {}).get("name", "MCP Tool Server"), + "meta": { + "description": server.get("info", {}).get( + "description", "" + ), + }, + "access_control": server.get("config", {}).get( + "access_control", None + ), + "updated_at": int(time.time()), + "created_at": int(time.time()), + **( + { + "authenticated": session_token is not None, + } + if auth_type == "oauth_2.1" + else {} + ), + } + ) + ) + if user.role == "admin" and BYPASS_ADMIN_ACCESS_CONTROL: # Admin can see all tools return tools @@ -462,8 +516,9 @@ async def update_tools_valves_by_id( try: form_data = {k: v for k, v in form_data.items() if v is not None} valves = Valves(**form_data) - Tools.update_tool_valves_by_id(id, valves.model_dump()) - return valves.model_dump() + valves_dict = valves.model_dump(exclude_unset=True) + Tools.update_tool_valves_by_id(id, valves_dict) + return valves_dict except Exception as e: log.exception(f"Failed to update tool valves by id {id}: {e}") raise HTTPException( @@ -538,10 +593,11 @@ async def update_tools_user_valves_by_id( try: form_data = {k: v for k, v in form_data.items() if v is not None} user_valves = UserValves(**form_data) + user_valves_dict = user_valves.model_dump(exclude_unset=True) Tools.update_user_valves_by_id_and_user_id( - id, user.id, user_valves.model_dump() + id, user.id, user_valves_dict ) - return user_valves.model_dump() + return user_valves_dict except Exception as e: log.exception(f"Failed to update user valves by id {id}: {e}") raise HTTPException( diff --git a/backend/open_webui/utils/access_control.py b/backend/open_webui/utils/access_control.py index 6215a6ac22..af48bebfb4 100644 --- a/backend/open_webui/utils/access_control.py +++ b/backend/open_webui/utils/access_control.py @@ -110,9 +110,13 @@ def has_access( type: str = "write", access_control: Optional[dict] = None, user_group_ids: Optional[Set[str]] = None, + strict: bool = True, ) -> bool: if access_control is None: - return type == "read" + if strict: + return type == "read" + else: + return True if user_group_ids is None: user_groups = Groups.get_groups_by_member_id(user_id) diff --git a/backend/open_webui/utils/files.py b/backend/open_webui/utils/files.py new file mode 100644 index 0000000000..b410cbab50 --- /dev/null +++ b/backend/open_webui/utils/files.py @@ -0,0 +1,97 @@ +from open_webui.routers.images import ( + load_b64_image_data, + upload_image, +) + +from fastapi import ( + APIRouter, + Depends, + HTTPException, + Request, + UploadFile, +) + +from open_webui.routers.files import upload_file_handler + +import mimetypes +import base64 +import io + + +def get_image_url_from_base64(request, base64_image_string, metadata, user): + if "data:image/png;base64" in base64_image_string: + image_url = "" + # Extract base64 image data from the line + image_data, content_type = load_b64_image_data(base64_image_string) + if image_data is not None: + image_url = upload_image( + request, + image_data, + content_type, + metadata, + user, + ) + return image_url + return None + + +def load_b64_audio_data(b64_str): + try: + if "," in b64_str: + header, b64_data = b64_str.split(",", 1) + else: + b64_data = b64_str + header = "data:audio/wav;base64" + audio_data = base64.b64decode(b64_data) + content_type = ( + header.split(";")[0].split(":")[1] if ";" in header else "audio/wav" + ) + return audio_data, content_type + except Exception as e: + print(f"Error decoding base64 audio data: {e}") + return None, None + + +def upload_audio(request, audio_data, content_type, metadata, user): + audio_format = mimetypes.guess_extension(content_type) + file = UploadFile( + file=io.BytesIO(audio_data), + filename=f"generated-{audio_format}", # will be converted to a unique ID on upload_file + headers={ + "content-type": content_type, + }, + ) + file_item = upload_file_handler( + request, + file=file, + metadata=metadata, + process=False, + user=user, + ) + url = request.app.url_path_for("get_file_content_by_id", id=file_item.id) + return url + + +def get_audio_url_from_base64(request, base64_audio_string, metadata, user): + if "data:audio/wav;base64" in base64_audio_string: + audio_url = "" + # Extract base64 audio data from the line + audio_data, content_type = load_b64_audio_data(base64_audio_string) + if audio_data is not None: + audio_url = upload_audio( + request, + audio_data, + content_type, + metadata, + user, + ) + return audio_url + return None + + +def get_file_url_from_base64(request, base64_file_string, metadata, user): + if "data:image/png;base64" in base64_file_string: + return get_image_url_from_base64(request, base64_file_string, metadata, user) + elif "data:audio/wav;base64" in base64_file_string: + return get_audio_url_from_base64(request, base64_file_string, metadata, user) + return None diff --git a/backend/open_webui/utils/mcp/client.py b/backend/open_webui/utils/mcp/client.py new file mode 100644 index 0000000000..01df38886c --- /dev/null +++ b/backend/open_webui/utils/mcp/client.py @@ -0,0 +1,110 @@ +import asyncio +from typing import Optional +from contextlib import AsyncExitStack + +from mcp import ClientSession +from mcp.client.auth import OAuthClientProvider, TokenStorage +from mcp.client.streamable_http import streamablehttp_client +from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken + + +class MCPClient: + def __init__(self): + self.session: Optional[ClientSession] = None + self.exit_stack = AsyncExitStack() + + async def connect(self, url: str, headers: Optional[dict] = None): + try: + self._streams_context = streamablehttp_client(url, headers=headers) + + transport = await self.exit_stack.enter_async_context(self._streams_context) + read_stream, write_stream, _ = transport + + self._session_context = ClientSession( + read_stream, write_stream + ) # pylint: disable=W0201 + + self.session = await self.exit_stack.enter_async_context( + self._session_context + ) + await self.session.initialize() + except Exception as e: + await self.disconnect() + raise e + + async def list_tool_specs(self) -> Optional[dict]: + if not self.session: + raise RuntimeError("MCP client is not connected.") + + result = await self.session.list_tools() + tools = result.tools + + tool_specs = [] + for tool in tools: + name = tool.name + description = tool.description + + inputSchema = tool.inputSchema + + # TODO: handle outputSchema if needed + outputSchema = getattr(tool, "outputSchema", None) + + tool_specs.append( + {"name": name, "description": description, "parameters": inputSchema} + ) + + return tool_specs + + async def call_tool( + self, function_name: str, function_args: dict + ) -> Optional[dict]: + if not self.session: + raise RuntimeError("MCP client is not connected.") + + result = await self.session.call_tool(function_name, function_args) + if not result: + raise Exception("No result returned from MCP tool call.") + + result_dict = result.model_dump(mode="json") + result_content = result_dict.get("content", {}) + + if result.isError: + raise Exception(result_content) + else: + return result_content + + async def list_resources(self, cursor: Optional[str] = None) -> Optional[dict]: + if not self.session: + raise RuntimeError("MCP client is not connected.") + + result = await self.session.list_resources(cursor=cursor) + if not result: + raise Exception("No result returned from MCP list_resources call.") + + result_dict = result.model_dump() + resources = result_dict.get("resources", []) + + return resources + + async def read_resource(self, uri: str) -> Optional[dict]: + if not self.session: + raise RuntimeError("MCP client is not connected.") + + result = await self.session.read_resource(uri) + if not result: + raise Exception("No result returned from MCP read_resource call.") + result_dict = result.model_dump() + + return result_dict + + async def disconnect(self): + # Clean up and close the session + await self.exit_stack.aclose() + + async def __aenter__(self): + await self.exit_stack.__aenter__() + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self.exit_stack.__aexit__(exc_type, exc_value, traceback) + await self.disconnect() diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 97f19dcded..ff8c215607 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -20,9 +20,11 @@ from concurrent.futures import ThreadPoolExecutor from fastapi import Request, HTTPException +from fastapi.responses import HTMLResponse from starlette.responses import Response, StreamingResponse, JSONResponse +from open_webui.models.oauth_sessions import OAuthSessions from open_webui.models.chats import Chats from open_webui.models.folders import Folders from open_webui.models.users import Users @@ -52,6 +54,11 @@ from open_webui.routers.pipelines import ( from open_webui.routers.memories import query_memory, QueryMemoryForm from open_webui.utils.webhook import post_webhook +from open_webui.utils.files import ( + get_audio_url_from_base64, + get_file_url_from_base64, + get_image_url_from_base64, +) from open_webui.models.users import UserModel @@ -86,6 +93,7 @@ from open_webui.utils.filter import ( ) from open_webui.utils.code_interpreter import execute_code_jupyter from open_webui.utils.payload import apply_system_prompt_to_body +from open_webui.utils.mcp.client import MCPClient from open_webui.config import ( @@ -144,12 +152,14 @@ async def chat_completion_tools_handler( def get_tools_function_calling_payload(messages, task_model_id, content): user_message = get_last_user_message(messages) - history = "\n".join( + + recent_messages = messages[-4:] if len(messages) > 4 else messages + chat_history = "\n".join( f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\"" - for message in messages[::-1][:4] + for message in recent_messages ) - prompt = f"History:\n{history}\nQuery: {user_message}" + prompt = f"History:\n{chat_history}\nQuery: {user_message}" return { "model": task_model_id, @@ -631,48 +641,53 @@ async def chat_completion_files_handler( sources = [] if files := body.get("metadata", {}).get("files", None): + # Check if all files are in full context mode + all_full_context = all(item.get("context") == "full" for item in files) + queries = [] - try: - queries_response = await generate_queries( - request, - { - "model": body["model"], - "messages": body["messages"], - "type": "retrieval", - }, - user, - ) - queries_response = queries_response["choices"][0]["message"]["content"] - + if not all_full_context: try: - bracket_start = queries_response.find("{") - bracket_end = queries_response.rfind("}") + 1 + queries_response = await generate_queries( + request, + { + "model": body["model"], + "messages": body["messages"], + "type": "retrieval", + }, + user, + ) + queries_response = queries_response["choices"][0]["message"]["content"] - if bracket_start == -1 or bracket_end == -1: - raise Exception("No JSON object found in the response") + try: + bracket_start = queries_response.find("{") + bracket_end = queries_response.rfind("}") + 1 - queries_response = queries_response[bracket_start:bracket_end] - queries_response = json.loads(queries_response) - except Exception as e: - queries_response = {"queries": [queries_response]} + if bracket_start == -1 or bracket_end == -1: + raise Exception("No JSON object found in the response") - queries = queries_response.get("queries", []) - except: - pass + queries_response = queries_response[bracket_start:bracket_end] + queries_response = json.loads(queries_response) + except Exception as e: + queries_response = {"queries": [queries_response]} + + queries = queries_response.get("queries", []) + except: + pass if len(queries) == 0: queries = [get_last_user_message(body["messages"])] - await __event_emitter__( - { - "type": "status", - "data": { - "action": "queries_generated", - "queries": queries, - "done": False, - }, - } - ) + if not all_full_context: + await __event_emitter__( + { + "type": "status", + "data": { + "action": "queries_generated", + "queries": queries, + "done": False, + }, + } + ) try: # Offload get_sources_from_items to a separate thread @@ -701,7 +716,8 @@ async def chat_completion_files_handler( r=request.app.state.config.RELEVANCE_THRESHOLD, hybrid_bm25_weight=request.app.state.config.HYBRID_BM25_WEIGHT, hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH, - full_context=request.app.state.config.RAG_FULL_CONTEXT, + full_context=all_full_context + or request.app.state.config.RAG_FULL_CONTEXT, user=user, ), ) @@ -818,7 +834,7 @@ async def process_chat_payload(request, form_data, user, metadata, model): oauth_token = None try: if request.cookies.get("oauth_session_id", None): - oauth_token = request.app.state.oauth_manager.get_oauth_token( + oauth_token = await request.app.state.oauth_manager.get_oauth_token( user.id, request.cookies.get("oauth_session_id", None), ) @@ -987,14 +1003,107 @@ async def process_chat_payload(request, form_data, user, metadata, model): # Server side tools tool_ids = metadata.get("tool_ids", None) # Client side tools - tool_servers = metadata.get("tool_servers", None) + direct_tool_servers = metadata.get("tool_servers", None) log.debug(f"{tool_ids=}") - log.debug(f"{tool_servers=}") + log.debug(f"{direct_tool_servers=}") tools_dict = {} + mcp_clients = [] + mcp_tools_dict = {} + if tool_ids: + for tool_id in tool_ids: + if tool_id.startswith("server:mcp:"): + try: + server_id = tool_id[len("server:mcp:") :] + + mcp_server_connection = None + for ( + server_connection + ) in request.app.state.config.TOOL_SERVER_CONNECTIONS: + if ( + server_connection.get("type", "") == "mcp" + and server_connection.get("info", {}).get("id") == server_id + ): + mcp_server_connection = server_connection + break + + if not mcp_server_connection: + log.error(f"MCP server with id {server_id} not found") + continue + + auth_type = mcp_server_connection.get("auth_type", "") + + headers = {} + if auth_type == "bearer": + headers["Authorization"] = ( + f"Bearer {mcp_server_connection.get('key', '')}" + ) + elif auth_type == "none": + # No authentication + pass + elif auth_type == "session": + headers["Authorization"] = ( + f"Bearer {request.state.token.credentials}" + ) + elif auth_type == "system_oauth": + oauth_token = extra_params.get("__oauth_token__", None) + if oauth_token: + headers["Authorization"] = ( + f"Bearer {oauth_token.get('access_token', '')}" + ) + elif auth_type == "oauth_2.1": + try: + splits = server_id.split(":") + server_id = splits[-1] if len(splits) > 1 else server_id + + oauth_token = await request.app.state.oauth_client_manager.get_oauth_token( + user.id, f"mcp:{server_id}" + ) + + if oauth_token: + headers["Authorization"] = ( + f"Bearer {oauth_token.get('access_token', '')}" + ) + except Exception as e: + log.error(f"Error getting OAuth token: {e}") + oauth_token = None + + mcp_client = MCPClient() + await mcp_client.connect( + url=mcp_server_connection.get("url", ""), + headers=headers if headers else None, + ) + + tool_specs = await mcp_client.list_tool_specs() + for tool_spec in tool_specs: + + def make_tool_function(function_name): + async def tool_function(**kwargs): + return await mcp_client.call_tool( + function_name, + function_args=kwargs, + ) + + return tool_function + + tool_function = make_tool_function(tool_spec["name"]) + + mcp_tools_dict[tool_spec["name"]] = { + "spec": tool_spec, + "callable": tool_function, + "type": "mcp", + "client": mcp_client, + "direct": False, + } + + mcp_clients.append(mcp_client) + except Exception as e: + log.debug(e) + continue + tools_dict = await get_tools( request, tool_ids, @@ -1006,9 +1115,11 @@ async def process_chat_payload(request, form_data, user, metadata, model): "__files__": metadata.get("files", []), }, ) + if mcp_tools_dict: + tools_dict = {**tools_dict, **mcp_tools_dict} - if tool_servers: - for tool_server in tool_servers: + if direct_tool_servers: + for tool_server in direct_tool_servers: tool_specs = tool_server.pop("specs", []) for tool in tool_specs: @@ -1018,6 +1129,9 @@ async def process_chat_payload(request, form_data, user, metadata, model): "server": tool_server, } + if mcp_clients: + metadata["mcp_clients"] = mcp_clients + if tools_dict: if metadata.get("params", {}).get("function_calling") == "native": # If the function calling is native, then call the tools function calling handler @@ -1026,6 +1140,7 @@ async def process_chat_payload(request, form_data, user, metadata, model): {"type": "function", "function": tool.get("spec", {})} for tool in tools_dict.values() ] + else: # If the function calling is not native, then call the tools function calling handler try: @@ -1079,26 +1194,15 @@ async def process_chat_payload(request, form_data, user, metadata, model): raise Exception("No user message found") if context_string != "": - # Workaround for Ollama 2.0+ system prompt issue - # TODO: replace with add_or_update_system_message - if model.get("owned_by") == "ollama": - form_data["messages"] = prepend_to_first_user_message_content( - rag_template( - request.app.state.config.RAG_TEMPLATE, - context_string, - prompt, - ), - form_data["messages"], - ) - else: - form_data["messages"] = add_or_update_system_message( - rag_template( - request.app.state.config.RAG_TEMPLATE, - context_string, - prompt, - ), - form_data["messages"], - ) + form_data["messages"] = add_or_update_user_message( + rag_template( + request.app.state.config.RAG_TEMPLATE, + context_string, + prompt, + ), + form_data["messages"], + append=False, + ) # If there are citations, add them to the data_items sources = [ @@ -1498,7 +1602,7 @@ async def process_chat_response( oauth_token = None try: if request.cookies.get("oauth_session_id", None): - oauth_token = request.app.state.oauth_manager.get_oauth_token( + oauth_token = await request.app.state.oauth_manager.get_oauth_token( user.id, request.cookies.get("oauth_session_id", None), ) @@ -1581,7 +1685,8 @@ async def process_chat_response( break if tool_result is not None: - tool_calls_display_content = f'{tool_calls_display_content}
\nTool Executed\n
\n' + tool_result_embeds = result.get("embeds", "") + tool_calls_display_content = f'{tool_calls_display_content}
\nTool Executed\n
\n' else: tool_calls_display_content = f'{tool_calls_display_content}
\nExecuting...\n
\n' @@ -2328,6 +2433,8 @@ async def process_chat_response( results = [] for tool_call in response_tool_calls: + + print("tool_call", tool_call) tool_call_id = tool_call.get("id", "") tool_name = tool_call.get("function", {}).get("name", "") tool_args = tool_call.get("function", {}).get("arguments", "{}") @@ -2402,14 +2509,145 @@ async def process_chat_response( except Exception as e: tool_result = str(e) + tool_result_embeds = [] + if isinstance(tool_result, HTMLResponse): + content_disposition = tool_result.headers.get( + "Content-Disposition", "" + ) + if "inline" in content_disposition: + content = tool_result.body.decode("utf-8") + tool_result_embeds.append(content) + + if 200 <= tool_result.status_code < 300: + tool_result = { + "status": "success", + "code": "ui_component", + "message": "Embedded UI result is active and visible to the user.", + } + elif 400 <= tool_result.status_code < 500: + tool_result = { + "status": "error", + "code": "ui_component", + "message": f"Client error {tool_result.status_code} from embedded UI result.", + } + elif 500 <= tool_result.status_code < 600: + tool_result = { + "status": "error", + "code": "ui_component", + "message": f"Server error {tool_result.status_code} from embedded UI result.", + } + else: + tool_result = { + "status": "error", + "code": "ui_component", + "message": f"Unexpected status code {tool_result.status_code} from embedded UI result.", + } + else: + tool_result = tool_result.body.decode("utf-8") + + elif ( + tool.get("type") == "external" + and isinstance(tool_result, tuple) + ) or ( + tool.get("direct", True) + and isinstance(tool_result, list) + and len(tool_result) == 2 + ): + tool_result, tool_response_headers = tool_result + + if tool_response_headers: + content_disposition = tool_response_headers.get( + "Content-Disposition", + tool_response_headers.get( + "content-disposition", "" + ), + ) + + if "inline" in content_disposition: + content_type = tool_response_headers.get( + "Content-Type", + tool_response_headers.get("content-type", ""), + ) + location = tool_response_headers.get( + "Location", + tool_response_headers.get("location", ""), + ) + + if "text/html" in content_type: + # Display as iframe embed + tool_result_embeds.append(tool_result) + tool_result = { + "status": "success", + "code": "ui_component", + "message": "Embedded UI result is active and visible to the user.", + } + elif location: + tool_result_embeds.append(location) + tool_result = { + "status": "success", + "code": "ui_component", + "message": "Embedded UI result is active and visible to the user.", + } + tool_result_files = [] if isinstance(tool_result, list): for item in tool_result: # check if string if isinstance(item, str) and item.startswith("data:"): - tool_result_files.append(item) + tool_result_files.append( + { + "type": "data", + "content": item, + } + ) tool_result.remove(item) + if tool.get("type") == "mcp": + if isinstance(item, dict): + if ( + item.get("type") == "image" + or item.get("type") == "audio" + ): + file_url = get_file_url_from_base64( + request, + f"data:{item.get('mimeType')};base64,{item.get('data', item.get('blob', ''))}", + { + "chat_id": metadata.get( + "chat_id", None + ), + "message_id": metadata.get( + "message_id", None + ), + "session_id": metadata.get( + "session_id", None + ), + "result": item, + }, + user, + ) + + tool_result_files.append( + { + "type": item.get("type", "data"), + "url": file_url, + } + ) + tool_result.remove(item) + + if tool_result_files: + if not isinstance(tool_result, list): + tool_result = [ + tool_result, + ] + + for file in tool_result_files: + tool_result.append( + { + "type": file.get("type", "data"), + "content": "Result is being displayed as a file.", + } + ) + if isinstance(tool_result, dict) or isinstance( tool_result, list ): @@ -2426,6 +2664,11 @@ async def process_chat_response( if tool_result_files else {} ), + **( + {"embeds": tool_result_embeds} + if tool_result_embeds + else {} + ), } ) @@ -2571,23 +2814,18 @@ async def process_chat_response( if isinstance(stdout, str): stdoutLines = stdout.split("\n") for idx, line in enumerate(stdoutLines): + if "data:image/png;base64" in line: - image_url = "" - # Extract base64 image data from the line - image_data, content_type = ( - load_b64_image_data(line) + image_url = get_image_url_from_base64( + request, + line, + metadata, + user, ) - if image_data is not None: - image_url = upload_image( - request, - image_data, - content_type, - metadata, - user, + if image_url: + stdoutLines[idx] = ( + f"![Output Image]({image_url})" ) - stdoutLines[idx] = ( - f"![Output Image]({image_url})" - ) output["stdout"] = "\n".join(stdoutLines) @@ -2597,19 +2835,12 @@ async def process_chat_response( resultLines = result.split("\n") for idx, line in enumerate(resultLines): if "data:image/png;base64" in line: - image_url = "" - # Extract base64 image data from the line - image_data, content_type = ( - load_b64_image_data(line) + image_url = get_image_url_from_base64( + request, + line, + metadata, + user, ) - if image_data is not None: - image_url = upload_image( - request, - image_data, - content_type, - metadata, - user, - ) resultLines[idx] = ( f"![Output Image]({image_url})" ) diff --git a/backend/open_webui/utils/misc.py b/backend/open_webui/utils/misc.py index 370cf26c48..e8cfa0d158 100644 --- a/backend/open_webui/utils/misc.py +++ b/backend/open_webui/utils/misc.py @@ -120,19 +120,20 @@ def pop_system_message(messages: list[dict]) -> tuple[Optional[dict], list[dict] return get_system_message(messages), remove_system_message(messages) -def prepend_to_first_user_message_content( - content: str, messages: list[dict] -) -> list[dict]: - for message in messages: - if message["role"] == "user": - if isinstance(message["content"], list): - for item in message["content"]: - if item["type"] == "text": - item["text"] = f"{content}\n{item['text']}" - else: - message["content"] = f"{content}\n{message['content']}" - break - return messages +def update_message_content(message: dict, content: str, append: bool = True) -> dict: + if isinstance(message["content"], list): + for item in message["content"]: + if item["type"] == "text": + if append: + item["text"] = f"{item['text']}\n{content}" + else: + item["text"] = f"{content}\n{item['text']}" + else: + if append: + message["content"] = f"{message['content']}\n{content}" + else: + message["content"] = f"{content}\n{message['content']}" + return message def add_or_update_system_message( @@ -148,10 +149,7 @@ def add_or_update_system_message( """ if messages and messages[0].get("role") == "system": - if append: - messages[0]["content"] = f"{messages[0]['content']}\n{content}" - else: - messages[0]["content"] = f"{content}\n{messages[0]['content']}" + messages[0] = update_message_content(messages[0], content, append) else: # Insert at the beginning messages.insert(0, {"role": "system", "content": content}) @@ -159,7 +157,7 @@ def add_or_update_system_message( return messages -def add_or_update_user_message(content: str, messages: list[dict]): +def add_or_update_user_message(content: str, messages: list[dict], append: bool = True): """ Adds a new user message at the end of the messages list or updates the existing user message at the end. @@ -170,7 +168,7 @@ def add_or_update_user_message(content: str, messages: list[dict]): """ if messages and messages[-1].get("role") == "user": - messages[-1]["content"] = f"{messages[-1]['content']}\n{content}" + messages[-1] = update_message_content(messages[-1], content, append) else: # Insert at the end messages.append({"role": "user", "content": content}) @@ -178,6 +176,16 @@ def add_or_update_user_message(content: str, messages: list[dict]): return messages +def prepend_to_first_user_message_content( + content: str, messages: list[dict] +) -> list[dict]: + for message in messages: + if message["role"] == "user": + message = update_message_content(message, content, append=False) + break + return messages + + def append_or_update_assistant_message(content: str, messages: list[dict]): """ Adds a new assistant message at the end of the messages list diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 9090c38ce5..9399241853 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -1,7 +1,9 @@ import base64 +import hashlib import logging import mimetypes import sys +import urllib import uuid import json from datetime import datetime, timedelta @@ -9,6 +11,9 @@ from datetime import datetime, timedelta import re import fnmatch import time +import secrets +from cryptography.fernet import Fernet + import aiohttp from authlib.integrations.starlette_client import OAuth @@ -18,6 +23,7 @@ from fastapi import ( status, ) from starlette.responses import RedirectResponse +from typing import Optional from open_webui.models.auths import Auths @@ -56,11 +62,27 @@ from open_webui.env import ( WEBUI_AUTH_COOKIE_SAME_SITE, WEBUI_AUTH_COOKIE_SECURE, ENABLE_OAUTH_ID_TOKEN_COOKIE, + OAUTH_CLIENT_INFO_ENCRYPTION_KEY, ) from open_webui.utils.misc import parse_duration from open_webui.utils.auth import get_password_hash, create_token from open_webui.utils.webhook import post_webhook +from mcp.shared.auth import ( + OAuthClientMetadata, + OAuthMetadata, +) + + +class OAuthClientInformationFull(OAuthClientMetadata): + issuer: Optional[str] = None # URL of the OAuth server that issued this client + + client_id: str + client_secret: str | None = None + client_id_issued_at: int | None = None + client_secret_expires_at: int | None = None + + from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) @@ -89,6 +111,42 @@ auth_manager_config.JWT_EXPIRES_IN = JWT_EXPIRES_IN auth_manager_config.OAUTH_UPDATE_PICTURE_ON_LOGIN = OAUTH_UPDATE_PICTURE_ON_LOGIN +FERNET = None + +if len(OAUTH_CLIENT_INFO_ENCRYPTION_KEY) != 44: + key_bytes = hashlib.sha256(OAUTH_CLIENT_INFO_ENCRYPTION_KEY.encode()).digest() + OAUTH_CLIENT_INFO_ENCRYPTION_KEY = base64.urlsafe_b64encode(key_bytes) +else: + OAUTH_CLIENT_INFO_ENCRYPTION_KEY = OAUTH_CLIENT_INFO_ENCRYPTION_KEY.encode() + +try: + FERNET = Fernet(OAUTH_CLIENT_INFO_ENCRYPTION_KEY) +except Exception as e: + log.error(f"Error initializing Fernet with provided key: {e}") + raise + + +def encrypt_data(data) -> str: + """Encrypt data for storage""" + try: + data_json = json.dumps(data) + encrypted = FERNET.encrypt(data_json.encode()).decode() + return encrypted + except Exception as e: + log.error(f"Error encrypting data: {e}") + raise + + +def decrypt_data(data: str): + """Decrypt data from storage""" + try: + decrypted = FERNET.decrypt(data.encode()).decode() + return json.loads(decrypted) + except Exception as e: + log.error(f"Error decrypting data: {e}") + raise + + def is_in_blocked_groups(group_name: str, groups: list) -> bool: """ Check if a group name matches any blocked pattern. @@ -133,6 +191,412 @@ def is_in_blocked_groups(group_name: str, groups: list) -> bool: return False +def get_parsed_and_base_url(server_url) -> tuple[urllib.parse.ParseResult, str]: + parsed = urllib.parse.urlparse(server_url) + base_url = f"{parsed.scheme}://{parsed.netloc}" + return parsed, base_url + + +def get_discovery_urls(server_url) -> list[str]: + urls = [] + parsed, base_url = get_parsed_and_base_url(server_url) + + urls.append( + urllib.parse.urljoin(base_url, "/.well-known/oauth-authorization-server") + ) + urls.append(urllib.parse.urljoin(base_url, "/.well-known/openid-configuration")) + + return urls + + +# TODO: Some OAuth providers require Initial Access Tokens (IATs) for dynamic client registration. +# This is not currently supported. +async def get_oauth_client_info_with_dynamic_client_registration( + request, + client_id: str, + oauth_server_url: str, + oauth_server_key: Optional[str] = None, +) -> OAuthClientInformationFull: + try: + oauth_server_metadata = None + oauth_server_metadata_url = None + + redirect_base_url = ( + str(request.app.state.config.WEBUI_URL or request.base_url) + ).rstrip("/") + + oauth_client_metadata = OAuthClientMetadata( + client_name="Open WebUI", + redirect_uris=[f"{redirect_base_url}/oauth/clients/{client_id}/callback"], + grant_types=["authorization_code", "refresh_token"], + response_types=["code"], + token_endpoint_auth_method="client_secret_post", + ) + + # Attempt to fetch OAuth server metadata to get registration endpoint & scopes + discovery_urls = get_discovery_urls(oauth_server_url) + for url in discovery_urls: + async with aiohttp.ClientSession() as session: + async with session.get( + url, ssl=AIOHTTP_CLIENT_SESSION_SSL + ) as oauth_server_metadata_response: + if oauth_server_metadata_response.status == 200: + try: + oauth_server_metadata = OAuthMetadata.model_validate( + await oauth_server_metadata_response.json() + ) + oauth_server_metadata_url = url + if ( + oauth_client_metadata.scope is None + and oauth_server_metadata.scopes_supported is not None + ): + oauth_client_metadata.scope = " ".join( + oauth_server_metadata.scopes_supported + ) + break + except Exception as e: + log.error(f"Error parsing OAuth metadata from {url}: {e}") + continue + + registration_url = None + if oauth_server_metadata and oauth_server_metadata.registration_endpoint: + registration_url = str(oauth_server_metadata.registration_endpoint) + else: + _, base_url = get_parsed_and_base_url(oauth_server_url) + registration_url = urllib.parse.urljoin(base_url, "/register") + + registration_data = oauth_client_metadata.model_dump( + exclude_none=True, + mode="json", + by_alias=True, + ) + + # Perform dynamic client registration and return client info + async with aiohttp.ClientSession() as session: + async with session.post( + registration_url, json=registration_data, ssl=AIOHTTP_CLIENT_SESSION_SSL + ) as oauth_client_registration_response: + try: + registration_response_json = ( + await oauth_client_registration_response.json() + ) + oauth_client_info = OAuthClientInformationFull.model_validate( + { + **registration_response_json, + **{"issuer": oauth_server_metadata_url}, + } + ) + log.info( + f"Dynamic client registration successful at {registration_url}, client_id: {oauth_client_info.client_id}" + ) + return oauth_client_info + except Exception as e: + error_text = None + try: + error_text = await oauth_client_registration_response.text() + log.error( + f"Dynamic client registration failed at {registration_url}: {oauth_client_registration_response.status} - {error_text}" + ) + except Exception as e: + pass + + log.error(f"Error parsing client registration response: {e}") + raise Exception( + f"Dynamic client registration failed: {error_text}" + if error_text + else "Error parsing client registration response" + ) + raise Exception("Dynamic client registration failed") + except Exception as e: + log.error(f"Exception during dynamic client registration: {e}") + raise e + + +class OAuthClientManager: + def __init__(self, app): + self.oauth = OAuth() + self.app = app + self.clients = {} + + def add_client(self, client_id, oauth_client_info: OAuthClientInformationFull): + self.clients[client_id] = { + "client": self.oauth.register( + name=client_id, + client_id=oauth_client_info.client_id, + client_secret=oauth_client_info.client_secret, + client_kwargs=( + {"scope": oauth_client_info.scope} + if oauth_client_info.scope + else {} + ), + server_metadata_url=( + oauth_client_info.issuer if oauth_client_info.issuer else None + ), + ), + "client_info": oauth_client_info, + } + return self.clients[client_id] + + def remove_client(self, client_id): + if client_id in self.clients: + del self.clients[client_id] + log.info(f"Removed OAuth client {client_id}") + return True + + def get_client(self, client_id): + client = self.clients.get(client_id) + return client["client"] if client else None + + def get_client_info(self, client_id): + client = self.clients.get(client_id) + return client["client_info"] if client else None + + def get_server_metadata_url(self, client_id): + if client_id in self.clients: + client = self.clients[client_id] + return ( + client.server_metadata_url + if hasattr(client, "server_metadata_url") + else None + ) + return None + + async def get_oauth_token( + self, user_id: str, client_id: str, force_refresh: bool = False + ): + """ + Get a valid OAuth token for the user, automatically refreshing if needed. + + Args: + user_id: The user ID + client_id: The OAuth client ID (provider) + force_refresh: Force token refresh even if current token appears valid + + Returns: + dict: OAuth token data with access_token, or None if no valid token available + """ + try: + # Get the OAuth session + session = OAuthSessions.get_session_by_provider_and_user_id( + client_id, user_id + ) + if not session: + log.warning( + f"No OAuth session found for user {user_id}, client_id {client_id}" + ) + return None + + if force_refresh or datetime.now() + timedelta( + minutes=5 + ) >= datetime.fromtimestamp(session.expires_at): + log.debug( + f"Token refresh needed for user {user_id}, client_id {session.provider}" + ) + refreshed_token = await self._refresh_token(session) + if refreshed_token: + return refreshed_token + else: + log.warning( + f"Token refresh failed for user {user_id}, client_id {session.provider}, deleting session {session.id}" + ) + OAuthSessions.delete_session_by_id(session.id) + return None + return session.token + + except Exception as e: + log.error(f"Error getting OAuth token for user {user_id}: {e}") + return None + + async def _refresh_token(self, session) -> dict: + """ + Refresh an OAuth token if needed, with concurrency protection. + + Args: + session: The OAuth session object + + Returns: + dict: Refreshed token data, or None if refresh failed + """ + try: + # Perform the actual refresh + refreshed_token = await self._perform_token_refresh(session) + + if refreshed_token: + # Update the session with new token data + session = OAuthSessions.update_session_by_id( + session.id, refreshed_token + ) + log.info(f"Successfully refreshed token for session {session.id}") + return session.token + else: + log.error(f"Failed to refresh token for session {session.id}") + return None + + except Exception as e: + log.error(f"Error refreshing token for session {session.id}: {e}") + return None + + async def _perform_token_refresh(self, session) -> dict: + """ + Perform the actual OAuth token refresh. + + Args: + session: The OAuth session object + + Returns: + dict: New token data, or None if refresh failed + """ + client_id = session.provider + token_data = session.token + + if not token_data.get("refresh_token"): + log.warning(f"No refresh token available for session {session.id}") + return None + + try: + client = self.get_client(client_id) + if not client: + log.error(f"No OAuth client found for provider {client_id}") + return None + + token_endpoint = None + async with aiohttp.ClientSession(trust_env=True) as session_http: + async with session_http.get( + self.get_server_metadata_url(client_id) + ) as r: + if r.status == 200: + openid_data = await r.json() + token_endpoint = openid_data.get("token_endpoint") + else: + log.error( + f"Failed to fetch OpenID configuration for client_id {client_id}" + ) + if not token_endpoint: + log.error(f"No token endpoint found for client_id {client_id}") + return None + + # Prepare refresh request + refresh_data = { + "grant_type": "refresh_token", + "refresh_token": token_data["refresh_token"], + "client_id": client.client_id, + } + if hasattr(client, "client_secret") and client.client_secret: + refresh_data["client_secret"] = client.client_secret + + # Make refresh request + async with aiohttp.ClientSession(trust_env=True) as session_http: + async with session_http.post( + token_endpoint, + data=refresh_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ssl=AIOHTTP_CLIENT_SESSION_SSL, + ) as r: + if r.status == 200: + new_token_data = await r.json() + + # Merge with existing token data (preserve refresh_token if not provided) + if "refresh_token" not in new_token_data: + new_token_data["refresh_token"] = token_data[ + "refresh_token" + ] + + # Add timestamp for tracking + new_token_data["issued_at"] = datetime.now().timestamp() + + # Calculate expires_at if we have expires_in + if ( + "expires_in" in new_token_data + and "expires_at" not in new_token_data + ): + new_token_data["expires_at"] = int( + datetime.now().timestamp() + + new_token_data["expires_in"] + ) + + log.debug(f"Token refresh successful for client_id {client_id}") + return new_token_data + else: + error_text = await r.text() + log.error( + f"Token refresh failed for client_id {client_id}: {r.status} - {error_text}" + ) + return None + + except Exception as e: + log.error(f"Exception during token refresh for client_id {client_id}: {e}") + return None + + async def handle_authorize(self, request, client_id: str) -> RedirectResponse: + client = self.get_client(client_id) + if client is None: + raise HTTPException(404) + + client_info = self.get_client_info(client_id) + if client_info is None: + raise HTTPException(404) + + redirect_uri = ( + client_info.redirect_uris[0] if client_info.redirect_uris else None + ) + return await client.authorize_redirect(request, str(redirect_uri)) + + async def handle_callback(self, request, client_id: str, user_id: str, response): + client = self.get_client(client_id) + if client is None: + raise HTTPException(404) + + error_message = None + try: + token = await client.authorize_access_token(request) + if token: + try: + # Add timestamp for tracking + token["issued_at"] = datetime.now().timestamp() + + # Calculate expires_at if we have expires_in + if "expires_in" in token and "expires_at" not in token: + token["expires_at"] = ( + datetime.now().timestamp() + token["expires_in"] + ) + + # Clean up any existing sessions for this user/client_id first + sessions = OAuthSessions.get_sessions_by_user_id(user_id) + for session in sessions: + if session.provider == client_id: + OAuthSessions.delete_session_by_id(session.id) + + session = OAuthSessions.create_session( + user_id=user_id, + provider=client_id, + token=token, + ) + log.info( + f"Stored OAuth session server-side for user {user_id}, client_id {client_id}" + ) + except Exception as e: + error_message = "Failed to store OAuth session server-side" + log.error(f"Failed to store OAuth session server-side: {e}") + else: + error_message = "Failed to obtain OAuth token" + log.warning(error_message) + except Exception as e: + error_message = "OAuth callback error" + log.warning(f"OAuth callback error: {e}") + + redirect_url = ( + str(request.app.state.config.WEBUI_URL or request.base_url) + ).rstrip("/") + + if error_message: + log.debug(error_message) + redirect_url = f"{redirect_url}/?error={error_message}" + return RedirectResponse(url=redirect_url, headers=response.headers) + + response = RedirectResponse(url=redirect_url, headers=response.headers) + return response + + class OAuthManager: def __init__(self, app): self.oauth = OAuth() @@ -157,7 +621,7 @@ class OAuthManager: ) return None - def get_oauth_token( + async def get_oauth_token( self, user_id: str, session_id: str, force_refresh: bool = False ): """ @@ -186,13 +650,15 @@ class OAuthManager: log.debug( f"Token refresh needed for user {user_id}, provider {session.provider}" ) - refreshed_token = self._refresh_token(session) + refreshed_token = await self._refresh_token(session) if refreshed_token: return refreshed_token else: log.warning( - f"Token refresh failed for user {user_id}, provider {session.provider}" + f"Token refresh failed for user {user_id}, provider {session.provider}, deleting session {session.id}" ) + OAuthSessions.delete_session_by_id(session.id) + return None return session.token @@ -252,9 +718,10 @@ class OAuthManager: log.error(f"No OAuth client found for provider {provider}") return None + server_metadata_url = self.get_server_metadata_url(provider) token_endpoint = None async with aiohttp.ClientSession(trust_env=True) as session_http: - async with session_http.get(client.gserver_metadata_url) as r: + async with session_http.get(server_metadata_url) as r: if r.status == 200: openid_data = await r.json() token_endpoint = openid_data.get("token_endpoint") @@ -301,7 +768,7 @@ class OAuthManager: "expires_in" in new_token_data and "expires_at" not in new_token_data ): - new_token_data["expires_at"] = ( + new_token_data["expires_at"] = int( datetime.now().timestamp() + new_token_data["expires_in"] ) @@ -574,7 +1041,7 @@ class OAuthManager: raise HTTPException(404) # If the provider has a custom redirect URL, use that, otherwise automatically generate one redirect_uri = OAUTH_PROVIDERS[provider].get("redirect_uri") or request.url_for( - "oauth_callback", provider=provider + "oauth_login_callback", provider=provider ) client = self.get_client(provider) if client is None: @@ -791,9 +1258,9 @@ class OAuthManager: else ERROR_MESSAGES.DEFAULT("Error during OAuth process") ) - redirect_base_url = str(request.app.state.config.WEBUI_URL or request.base_url) - if redirect_base_url.endswith("/"): - redirect_base_url = redirect_base_url[:-1] + redirect_base_url = ( + str(request.app.state.config.WEBUI_URL or request.base_url) + ).rstrip("/") redirect_url = f"{redirect_base_url}/auth" if error_message: diff --git a/backend/open_webui/utils/tools.py b/backend/open_webui/utils/tools.py index bc90dd9d74..4c8289578e 100644 --- a/backend/open_webui/utils/tools.py +++ b/backend/open_webui/utils/tools.py @@ -96,92 +96,117 @@ async def get_tools( for tool_id in tool_ids: tool = Tools.get_tool_by_id(tool_id) if tool is None: + if tool_id.startswith("server:"): - server_id = tool_id.split(":")[1] + splits = tool_id.split(":") - tool_server_data = None - for server in await get_tool_servers(request): - if server["id"] == server_id: - tool_server_data = server - break + if len(splits) == 2: + type = "openapi" + server_id = splits[1] + elif len(splits) == 3: + type = splits[1] + server_id = splits[2] - if tool_server_data is None: - log.warning(f"Tool server data not found for {server_id}") + server_id_splits = server_id.split("|") + if len(server_id_splits) == 2: + server_id = server_id_splits[0] + function_names = server_id_splits[1].split(",") + + if type == "openapi": + + tool_server_data = None + for server in await get_tool_servers(request): + if server["id"] == server_id: + tool_server_data = server + break + + if tool_server_data is None: + log.warning(f"Tool server data not found for {server_id}") + continue + + tool_server_idx = tool_server_data.get("idx", 0) + tool_server_connection = ( + request.app.state.config.TOOL_SERVER_CONNECTIONS[ + tool_server_idx + ] + ) + + specs = tool_server_data.get("specs", []) + for spec in specs: + function_name = spec["name"] + + auth_type = tool_server_connection.get("auth_type", "bearer") + + cookies = {} + headers = {} + + if auth_type == "bearer": + headers["Authorization"] = ( + f"Bearer {tool_server_connection.get('key', '')}" + ) + elif auth_type == "none": + # No authentication + pass + elif auth_type == "session": + cookies = request.cookies + headers["Authorization"] = ( + f"Bearer {request.state.token.credentials}" + ) + elif auth_type == "system_oauth": + cookies = request.cookies + oauth_token = extra_params.get("__oauth_token__", None) + if oauth_token: + headers["Authorization"] = ( + f"Bearer {oauth_token.get('access_token', '')}" + ) + + headers["Content-Type"] = "application/json" + + def make_tool_function( + function_name, tool_server_data, headers + ): + async def tool_function(**kwargs): + return await execute_tool_server( + url=tool_server_data["url"], + headers=headers, + cookies=cookies, + name=function_name, + params=kwargs, + server_data=tool_server_data, + ) + + return tool_function + + tool_function = make_tool_function( + function_name, tool_server_data, headers + ) + + callable = get_async_tool_function_and_apply_extra_params( + tool_function, + {}, + ) + + tool_dict = { + "tool_id": tool_id, + "callable": callable, + "spec": spec, + # Misc info + "type": "external", + } + + # Handle function name collisions + while function_name in tools_dict: + log.warning( + f"Tool {function_name} already exists in another tools!" + ) + # Prepend server ID to function name + function_name = f"{server_id}_{function_name}" + + tools_dict[function_name] = tool_dict + + else: continue - tool_server_idx = tool_server_data.get("idx", 0) - tool_server_connection = ( - request.app.state.config.TOOL_SERVER_CONNECTIONS[tool_server_idx] - ) - - specs = tool_server_data.get("specs", []) - for spec in specs: - function_name = spec["name"] - - auth_type = tool_server_connection.get("auth_type", "bearer") - - cookies = {} - headers = {} - - if auth_type == "bearer": - headers["Authorization"] = ( - f"Bearer {tool_server_connection.get('key', '')}" - ) - elif auth_type == "none": - # No authentication - pass - elif auth_type == "session": - cookies = request.cookies - headers["Authorization"] = ( - f"Bearer {request.state.token.credentials}" - ) - elif auth_type == "system_oauth": - cookies = request.cookies - oauth_token = extra_params.get("__oauth_token__", None) - if oauth_token: - headers["Authorization"] = ( - f"Bearer {oauth_token.get('access_token', '')}" - ) - - headers["Content-Type"] = "application/json" - - def make_tool_function(function_name, tool_server_data, headers): - async def tool_function(**kwargs): - return await execute_tool_server( - url=tool_server_data["url"], - headers=headers, - cookies=cookies, - name=function_name, - params=kwargs, - server_data=tool_server_data, - ) - - return tool_function - - tool_function = make_tool_function( - function_name, tool_server_data, headers - ) - - callable = get_async_tool_function_and_apply_extra_params( - tool_function, - {}, - ) - - tool_dict = { - "tool_id": tool_id, - "callable": callable, - "spec": spec, - } - - # Handle function name collisions - while function_name in tools_dict: - log.warning( - f"Tool {function_name} already exists in another tools!" - ) - # Prepend server ID to function name - function_name = f"{server_id}_{function_name}" - - tools_dict[function_name] = tool_dict else: continue else: @@ -577,7 +602,10 @@ async def get_tool_servers_data(servers: List[Dict[str, Any]]) -> List[Dict[str, # Prepare list of enabled servers along with their original index server_entries = [] for idx, server in enumerate(servers): - if server.get("config", {}).get("enable"): + if ( + server.get("config", {}).get("enable") + and server.get("type", "openapi") == "openapi" + ): # Path (to OpenAPI spec URL) can be either a full URL or a path to append to the base URL openapi_path = server.get("path", "openapi.json") full_url = get_tool_server_url(server.get("url"), openapi_path) @@ -646,7 +674,7 @@ async def execute_tool_server( name: str, params: Dict[str, Any], server_data: Dict[str, Any], -) -> Any: +) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: error = None try: openapi = server_data.get("openapi", {}) @@ -718,6 +746,7 @@ async def execute_tool_server( headers=headers, cookies=cookies, ssl=AIOHTTP_CLIENT_SESSION_TOOL_SERVER_SSL, + allow_redirects=False, ) as response: if response.status >= 400: text = await response.text() @@ -728,13 +757,15 @@ async def execute_tool_server( except Exception: response_data = await response.text() - return response_data + response_headers = response.headers + return (response_data, response_headers) else: async with request_method( final_url, headers=headers, cookies=cookies, ssl=AIOHTTP_CLIENT_SESSION_TOOL_SERVER_SSL, + allow_redirects=False, ) as response: if response.status >= 400: text = await response.text() @@ -745,12 +776,13 @@ async def execute_tool_server( except Exception: response_data = await response.text() - return response_data + response_headers = response.headers + return (response_data, response_headers) except Exception as err: error = str(err) log.exception(f"API Request Error: {error}") - return {"error": error} + return ({"error": error}, None) def get_tool_server_url(url: Optional[str], path: str) -> str: diff --git a/backend/requirements.txt b/backend/requirements.txt index 5871015075..27e0c24cb7 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -2,58 +2,66 @@ fastapi==0.115.7 uvicorn[standard]==0.35.0 pydantic==2.11.7 python-multipart==0.0.20 +itsdangerous==2.2.0 python-socketio==5.13.0 python-jose==3.4.0 passlib[bcrypt]==1.7.4 cryptography +bcrypt==4.3.0 +argon2-cffi==25.1.0 +PyJWT[crypto]==2.10.1 +authlib==1.6.3 -requests==2.32.4 +requests==2.32.5 aiohttp==3.12.15 async-timeout aiocache aiofiles starlette-compress==1.6.0 httpx[socks,http2,zstd,cli,brotli]==0.28.1 +starsessions[redis]==2.2.1 sqlalchemy==2.0.38 alembic==1.14.0 peewee==3.18.1 peewee-migrate==1.12.2 -psycopg2-binary==2.9.10 -pgvector==0.4.1 -PyMySQL==1.1.1 -bcrypt==4.3.0 - -pymongo -redis -boto3==1.40.5 - -argon2-cffi==25.1.0 -APScheduler==3.10.4 pycrdt==0.12.25 +redis +pymongo + +psycopg2-binary==2.9.10 +pgvector==0.4.1 + +PyMySQL==1.1.1 +boto3==1.40.5 + +APScheduler==3.10.4 RestrictedPython==8.0 loguru==0.7.3 asgiref==3.8.1 # AI libraries +tiktoken +mcp==1.14.1 + openai anthropic -google-genai==1.32.0 +google-genai==1.38.0 google-generativeai==0.8.5 -tiktoken -langchain==0.3.26 -langchain-community==0.3.27 +langchain==0.3.27 +langchain-community==0.3.29 fake-useragent==2.2.0 chromadb==1.0.20 +opensearch-py==2.8.0 + pymilvus==2.5.0 qdrant-client==1.14.3 -opensearch-py==2.8.0 playwright==1.49.1 # Caution: version must match docker-compose.playwright.yaml elasticsearch==9.1.0 pinecone==6.0.2 @@ -61,12 +69,12 @@ oracledb==3.2.0 av==14.0.1 # Caution: Set due to FATAL FIPS SELFTEST FAILURE, see discussion https://github.com/open-webui/open-webui/discussions/15720 transformers -sentence-transformers==4.1.0 +sentence-transformers==5.1.1 accelerate -colbert-ai==0.2.21 pyarrow==20.0.0 # fix: pin pyarrow version to 20 for rpi compatibility #15897 einops==0.8.1 +colbert-ai==0.2.21 ftfy==6.2.3 pypdf==6.0.0 @@ -94,11 +102,8 @@ rapidocr-onnxruntime==1.4.4 rank-bm25==0.2.2 onnxruntime==1.20.1 - faster-whisper==1.1.1 -PyJWT[crypto]==2.10.1 -authlib==1.6.3 black==25.1.0 youtube-transcript-api==1.2.2 @@ -120,7 +125,7 @@ pytest-docker~=3.1.1 googleapis-common-protos==1.70.0 google-cloud-storage==2.19.0 -azure-identity==1.23.0 +azure-identity==1.25.0 azure-storage-blob==12.24.1 diff --git a/package-lock.json b/package-lock.json index 3909c68604..c6d6fc47af 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.6.30", + "version": "0.6.31", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.6.30", + "version": "0.6.31", "dependencies": { "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", @@ -23,7 +23,7 @@ "@tiptap/core": "^3.0.7", "@tiptap/extension-bubble-menu": "^2.26.1", "@tiptap/extension-code-block-lowlight": "^3.0.7", - "@tiptap/extension-drag-handle": "^3.0.7", + "@tiptap/extension-drag-handle": "^3.4.5", "@tiptap/extension-file-handler": "^3.0.7", "@tiptap/extension-floating-menu": "^2.26.1", "@tiptap/extension-highlight": "^3.3.0", @@ -39,6 +39,7 @@ "@tiptap/starter-kit": "^3.0.7", "@tiptap/suggestion": "^3.4.2", "@xyflow/svelte": "^0.1.19", + "alpinejs": "^3.15.0", "async": "^3.2.5", "bits-ui": "^0.21.15", "chart.js": "^4.5.0", @@ -3383,9 +3384,9 @@ } }, "node_modules/@tiptap/extension-collaboration": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@tiptap/extension-collaboration/-/extension-collaboration-3.0.7.tgz", - "integrity": "sha512-so59vQCAS1vy6k86byk96fYvAPM5w8u8/Yp3jKF1LPi9LH4wzS4hGnOP/dEbedxPU48an9WB1lSOczSKPECJaQ==", + "version": "3.4.5", + "resolved": "https://registry.npmjs.org/@tiptap/extension-collaboration/-/extension-collaboration-3.4.5.tgz", + "integrity": "sha512-JyPXTYkYi2XzUWsmObv2cogMrs7huAvfq6l7d5hAwsU2FnA1vMycaa48N4uekogySP6VBkiQNDf9B4T09AwwqA==", "license": "MIT", "peer": true, "funding": { @@ -3393,8 +3394,8 @@ "url": "https://github.com/sponsors/ueberdosis" }, "peerDependencies": { - "@tiptap/core": "^3.0.7", - "@tiptap/pm": "^3.0.7", + "@tiptap/core": "^3.4.5", + "@tiptap/pm": "^3.4.5", "@tiptap/y-tiptap": "^3.0.0-beta.3", "yjs": "^13" } @@ -3413,9 +3414,9 @@ } }, "node_modules/@tiptap/extension-drag-handle": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@tiptap/extension-drag-handle/-/extension-drag-handle-3.0.7.tgz", - "integrity": "sha512-rm8+0kPz5C5JTp4f1QY61Qd5d7zlJAxLeJtOvgC9RCnrNG1F7LCsmOkvy5fsU6Qk2YCCYOiSSMC4S4HKPrUJhw==", + "version": "3.4.5", + "resolved": "https://registry.npmjs.org/@tiptap/extension-drag-handle/-/extension-drag-handle-3.4.5.tgz", + "integrity": "sha512-177hQ9lMQYJz+SuCg8eA47MB2tn3G3MGBJ5+3PNl5Bs4WQukR9uHpxdR+bH00/LedwxrlNlglMa5Hirrx9odMQ==", "license": "MIT", "dependencies": { "@floating-ui/dom": "^1.6.13" @@ -3425,10 +3426,10 @@ "url": "https://github.com/sponsors/ueberdosis" }, "peerDependencies": { - "@tiptap/core": "^3.0.7", - "@tiptap/extension-collaboration": "^3.0.7", - "@tiptap/extension-node-range": "^3.0.7", - "@tiptap/pm": "^3.0.7", + "@tiptap/core": "^3.4.5", + "@tiptap/extension-collaboration": "^3.4.5", + "@tiptap/extension-node-range": "^3.4.5", + "@tiptap/pm": "^3.4.5", "@tiptap/y-tiptap": "^3.0.0-beta.3" } }, @@ -3642,9 +3643,9 @@ } }, "node_modules/@tiptap/extension-node-range": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@tiptap/extension-node-range/-/extension-node-range-3.0.7.tgz", - "integrity": "sha512-cHViNqtOUD9CLJxEj28rcj8tb8RYQZ7kwmtSvIye84Y3MJIzigRm4IUBNNOYnZfq5YAZIR97WKcJeFz3EU1VPg==", + "version": "3.4.5", + "resolved": "https://registry.npmjs.org/@tiptap/extension-node-range/-/extension-node-range-3.4.5.tgz", + "integrity": "sha512-mHCjdJZX8DZCpnw9wBqioanANy6tRoy20/OcJxMW1T7naeRCuCU4sFjwO37yb/tmYk1BQA2/L1/H2r0fVoZwtA==", "license": "MIT", "peer": true, "funding": { @@ -3652,8 +3653,8 @@ "url": "https://github.com/sponsors/ueberdosis" }, "peerDependencies": { - "@tiptap/core": "^3.0.7", - "@tiptap/pm": "^3.0.7" + "@tiptap/core": "^3.4.5", + "@tiptap/pm": "^3.4.5" } }, "node_modules/@tiptap/extension-ordered-list": { @@ -4569,6 +4570,21 @@ "@types/estree": "^1.0.0" } }, + "node_modules/@vue/reactivity": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.1.5.tgz", + "integrity": "sha512-1tdfLmNjWG6t/CsPldh+foumYFo3cpyCHgBYQ34ylaMsJ+SNHQ1kApMIa8jN+i593zQuaw3AdWH0nJTARzCFhg==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.1.5" + } + }, + "node_modules/@vue/shared": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.1.5.tgz", + "integrity": "sha512-oJ4F3TnvpXaQwZJNF3ZK+kLPHKarDmJjJ6jyzVNDKH9md1dptjC7lWR//jrGuLdek/U6iltWxqAnYOu8gCiOvA==", + "license": "MIT" + }, "node_modules/@webreflection/fetch": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/@webreflection/fetch/-/fetch-0.1.5.tgz", @@ -4672,6 +4688,15 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/alpinejs": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/alpinejs/-/alpinejs-3.15.0.tgz", + "integrity": "sha512-lpokA5okCF1BKh10LG8YjqhfpxyHBk4gE7boIgVHltJzYoM7O9nK3M7VlntLEJGsVmu7U/RzUWajmHREGT38Eg==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "~3.1.1" + } + }, "node_modules/amator": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/amator/-/amator-1.1.0.tgz", diff --git a/package.json b/package.json index f577cfdcb6..67f7b6ddda 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.6.30", + "version": "0.6.31", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", @@ -67,7 +67,7 @@ "@tiptap/core": "^3.0.7", "@tiptap/extension-bubble-menu": "^2.26.1", "@tiptap/extension-code-block-lowlight": "^3.0.7", - "@tiptap/extension-drag-handle": "^3.0.7", + "@tiptap/extension-drag-handle": "^3.4.5", "@tiptap/extension-file-handler": "^3.0.7", "@tiptap/extension-floating-menu": "^2.26.1", "@tiptap/extension-highlight": "^3.3.0", @@ -83,6 +83,7 @@ "@tiptap/starter-kit": "^3.0.7", "@tiptap/suggestion": "^3.4.2", "@xyflow/svelte": "^0.1.19", + "alpinejs": "^3.15.0", "async": "^3.2.5", "bits-ui": "^0.21.15", "chart.js": "^4.5.0", diff --git a/pyproject.toml b/pyproject.toml index 4bbaf85b6b..7378d3d287 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,23 +10,25 @@ dependencies = [ "uvicorn[standard]==0.35.0", "pydantic==2.11.7", "python-multipart==0.0.20", + "itsdangerous==2.2.0", "python-socketio==5.13.0", "python-jose==3.4.0", "passlib[bcrypt]==1.7.4", "cryptography", "bcrypt==4.3.0", - "argon2-cffi==23.1.0", + "argon2-cffi==25.1.0", "PyJWT[crypto]==2.10.1", "authlib==1.6.3", - "requests==2.32.4", + "requests==2.32.5", "aiohttp==3.12.15", "async-timeout", "aiocache", "aiofiles", "starlette-compress==1.6.0", "httpx[socks,http2,zstd,cli,brotli]==0.28.1", + "starsessions[redis]==2.2.1", "sqlalchemy==2.0.38", "alembic==1.14.0", @@ -46,20 +48,22 @@ dependencies = [ "asgiref==3.8.1", "tiktoken", + "mcp==1.14.1", + "openai", "anthropic", - "google-genai==1.32.0", + "google-genai==1.38.0", "google-generativeai==0.8.5", - "langchain==0.3.26", - "langchain-community==0.3.27", + "langchain==0.3.27", + "langchain-community==0.3.29", "fake-useragent==2.2.0", "chromadb==1.0.20", "opensearch-py==2.8.0", "transformers", - "sentence-transformers==4.1.0", + "sentence-transformers==5.1.1", "accelerate", "pyarrow==20.0.0", "einops==0.8.1", @@ -108,7 +112,7 @@ dependencies = [ "googleapis-common-protos==1.70.0", "google-cloud-storage==2.19.0", - "azure-identity==1.20.0", + "azure-identity==1.25.0", "azure-storage-blob==12.24.1", "ldap3==2.9.1", diff --git a/src/app.css b/src/app.css index af030d8350..e8f4ee137b 100644 --- a/src/app.css +++ b/src/app.css @@ -116,7 +116,7 @@ li p { ::-webkit-scrollbar-thumb { --tw-border-opacity: 1; - background-color: rgba(215, 215, 215, 0.8); + background-color: rgba(215, 215, 215, 0.6); border-color: rgba(255, 255, 255, var(--tw-border-opacity)); border-radius: 9999px; border-width: 1px; @@ -124,12 +124,12 @@ li p { /* Dark theme scrollbar styles */ .dark ::-webkit-scrollbar-thumb { - background-color: rgba(67, 67, 67, 0.8); /* Darker color for dark theme */ + background-color: rgba(67, 67, 67, 0.6); /* Darker color for dark theme */ border-color: rgba(0, 0, 0, var(--tw-border-opacity)); } ::-webkit-scrollbar { - height: 0.6rem; + height: 0.4rem; width: 0.4rem; } @@ -661,3 +661,112 @@ body { background: #171717; color: #eee; } + +/* Position the handle relative to each LI */ +.pm-li--with-handle { + position: relative; + margin-left: 12px; /* make space for the handle */ +} + +.tiptap ul[data-type='taskList'] .pm-list-drag-handle { + margin-left: 0px; +} + +/* The drag handle itself */ +.pm-list-drag-handle { + position: absolute; + left: -36px; /* pull into the left gutter */ + top: 1px; + width: 18px; + height: 18px; + display: inline-flex; + align-items: center; + justify-content: center; + font-size: 12px; + line-height: 1; + border-radius: 4px; + cursor: grab; + user-select: none; + opacity: 0.35; + transition: + opacity 120ms ease, + background 120ms ease; +} + +.tiptap ul[data-type='taskList'] .pm-list-drag-handle { + left: -16px; /* pull into the left gutter more to avoid the checkbox */ +} + +.pm-list-drag-handle:active { + cursor: grabbing; +} +.pm-li--with-handle:hover > .pm-list-drag-handle { + opacity: 1; +} +.pm-list-drag-handle:hover { + background: rgba(0, 0, 0, 0.06); +} + +:root { + --pm-accent: color-mix(in oklab, Highlight 70%, transparent); + --pm-fill-target: color-mix(in oklab, Highlight 26%, transparent); + --pm-fill-ancestor: color-mix(in oklab, Highlight 16%, transparent); +} + +.pm-li-drop-before, +.pm-li-drop-after, +.pm-li-drop-into, +.pm-li-drop-outdent { + position: relative; +} + +/* BEFORE/AFTER lines */ +.pm-li-drop-before::before, +.pm-li-drop-after::after { + content: ''; + position: absolute; + left: 0; + right: 0; + height: 3px; + background: var(--pm-accent); + pointer-events: none; +} +.pm-li-drop-before::before { + top: -2px; +} +.pm-li-drop-after::after { + bottom: -2px; +} + +.pm-li-drop-before, +.pm-li-drop-after, +.pm-li-drop-into, +.pm-li-drop-outdent { + background: var(--pm-fill-target); + border-radius: 6px; +} + +.pm-li-drop-outdent::before { + content: ''; + position: absolute; + inset-block: 0; + inset-inline-start: 0; + width: 3px; + background: color-mix(in oklab, Highlight 35%, transparent); +} + +.pm-li--with-handle:has(.pm-li-drop-before), +.pm-li--with-handle:has(.pm-li-drop-after), +.pm-li--with-handle:has(.pm-li-drop-into), +.pm-li--with-handle:has(.pm-li-drop-outdent) { + background: var(--pm-fill-ancestor); + border-radius: 6px; +} + +.pm-li-drop-before, +.pm-li-drop-after, +.pm-li-drop-into, +.pm-li-drop-outdent { + position: relative; + z-index: 0; +} diff --git a/src/lib/apis/chats/index.ts b/src/lib/apis/chats/index.ts index b1e7d5f23b..59d8600771 100644 --- a/src/lib/apis/chats/index.ts +++ b/src/lib/apis/chats/index.ts @@ -77,7 +77,11 @@ export const importChat = async ( return res; }; -export const getChatList = async (token: string = '', page: number | null = null) => { +export const getChatList = async ( + token: string = '', + page: number | null = null, + include_folders: boolean = false +) => { let error = null; const searchParams = new URLSearchParams(); @@ -85,6 +89,10 @@ export const getChatList = async (token: string = '', page: number | null = null searchParams.append('page', `${page}`); } + if (include_folders) { + searchParams.append('include_folders', 'true'); + } + const res = await fetch(`${WEBUI_API_BASE_URL}/chats/?${searchParams.toString()}`, { method: 'GET', headers: { diff --git a/src/lib/apis/configs/index.ts b/src/lib/apis/configs/index.ts index ef983e63bf..c6cfdd2b2b 100644 --- a/src/lib/apis/configs/index.ts +++ b/src/lib/apis/configs/index.ts @@ -1,4 +1,4 @@ -import { WEBUI_API_BASE_URL } from '$lib/constants'; +import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; import type { Banner } from '$lib/types'; export const importConfig = async (token: string, config) => { @@ -202,6 +202,52 @@ export const verifyToolServerConnection = async (token: string, connection: obje return res; }; +type RegisterOAuthClientForm = { + url: string; + client_id: string; + client_name?: string; +}; + +export const registerOAuthClient = async ( + token: string, + formData: RegisterOAuthClientForm, + type: null | string = null +) => { + let error = null; + + const searchParams = type ? `?type=${type}` : ''; + const res = await fetch(`${WEBUI_API_BASE_URL}/configs/oauth/clients/register${searchParams}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}` + }, + body: JSON.stringify({ + ...formData + }) + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.error(err); + error = err.detail; + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + +export const getOAuthClientAuthorizationUrl = (clientId: string, type: null | string = null) => { + const oauthClientId = type ? `${type}:${clientId}` : clientId; + return `${WEBUI_BASE_URL}/oauth/clients/${oauthClientId}/authorize`; +}; + export const getCodeExecutionConfig = async (token: string) => { let error = null; diff --git a/src/lib/apis/index.ts b/src/lib/apis/index.ts index e36eeba12e..937e7cec81 100644 --- a/src/lib/apis/index.ts +++ b/src/lib/apis/index.ts @@ -493,18 +493,25 @@ export const executeToolServer = async ( throw new Error(`HTTP error! Status: ${res.status}. Message: ${resText}`); } - let responseData; - try { - responseData = await res.json(); - } catch (err) { - responseData = await res.text(); - } + // make a clone of res and extract headers + const responseHeaders = {}; + res.headers.forEach((value, key) => { + responseHeaders[key] = value; + }); - return responseData; + const text = await res.text(); + let responseData; + + try { + responseData = JSON.parse(text); + } catch { + responseData = text; + } + return [responseData, responseHeaders]; } catch (err: any) { error = err.message; console.error('API Request Error:', error); - return { error }; + return [{ error }, null]; } }; diff --git a/src/lib/components/AddServerModal.svelte b/src/lib/components/AddToolServerModal.svelte similarity index 66% rename from src/lib/components/AddServerModal.svelte rename to src/lib/components/AddToolServerModal.svelte index 4a407b8279..c6894ddeeb 100644 --- a/src/lib/components/AddServerModal.svelte +++ b/src/lib/components/AddToolServerModal.svelte @@ -13,7 +13,7 @@ import Switch from '$lib/components/common/Switch.svelte'; import Tags from './common/Tags.svelte'; import { getToolServerData } from '$lib/apis'; - import { verifyToolServerConnection } from '$lib/apis/configs'; + import { verifyToolServerConnection, registerOAuthClient } from '$lib/apis/configs'; import AccessControl from './workspace/common/AccessControl.svelte'; import Spinner from '$lib/components/common/Spinner.svelte'; import XMark from '$lib/components/icons/XMark.svelte'; @@ -30,6 +30,8 @@ let url = ''; let path = 'openapi.json'; + let type = 'openapi'; // 'openapi', 'mcp' + let auth_type = 'bearer'; let key = ''; @@ -39,10 +41,47 @@ let name = ''; let description = ''; - let enable = true; + let oauthClientInfo = null; + let enable = true; let loading = false; + const registerOAuthClientHandler = async () => { + if (url === '') { + toast.error($i18n.t('Please enter a valid URL')); + return; + } + + if (id === '') { + toast.error($i18n.t('Please enter a valid ID')); + return; + } + + const res = await registerOAuthClient( + localStorage.token, + { + url: url, + client_id: id + }, + 'mcp' + ).catch((err) => { + toast.error($i18n.t('Registration failed')); + return null; + }); + + if (res) { + toast.warning( + $i18n.t( + 'Please save the connection to persist the OAuth client information and do not change the ID' + ) + ); + toast.success($i18n.t('Registration successful')); + + console.debug('Registration successful', res); + oauthClientInfo = res?.oauth_client_info ?? null; + } + }; + const verifyHandler = async () => { if (url === '') { toast.error($i18n.t('Please enter a valid URL')); @@ -70,6 +109,7 @@ const res = await verifyToolServerConnection(localStorage.token, { url, path, + type, auth_type, key, config: { @@ -97,10 +137,22 @@ // remove trailing slash from url url = url.replace(/\/$/, ''); + if (id.includes(':') || id.includes('|')) { + toast.error($i18n.t('ID cannot contain ":" or "|" characters')); + loading = false; + return; + } + + if (type === 'mcp' && auth_type === 'oauth_2.1' && !oauthClientInfo) { + toast.error($i18n.t('Please register the OAuth client')); + loading = false; + return; + } const connection = { url, path, + type, auth_type, key, config: { @@ -110,7 +162,8 @@ info: { id: id, name: name, - description: description + description: description, + ...(oauthClientInfo ? { oauth_client_info: oauthClientInfo } : {}) } }; @@ -119,14 +172,18 @@ loading = false; show = false; + // reset form url = ''; path = 'openapi.json'; + type = 'openapi'; + key = ''; auth_type = 'bearer'; id = ''; name = ''; description = ''; + oauthClientInfo = null; enable = true; accessControl = null; @@ -137,12 +194,14 @@ url = connection.url; path = connection?.path ?? 'openapi.json'; + type = connection?.type ?? 'openapi'; auth_type = connection?.auth_type ?? 'bearer'; key = connection?.key ?? ''; id = connection.info?.id ?? ''; name = connection.info?.name ?? ''; description = connection.info?.description ?? ''; + oauthClientInfo = connection.info?.oauth_client_info ?? null; enable = connection.config?.enable ?? true; accessControl = connection.config?.access_control ?? null; @@ -189,6 +248,31 @@ }} >
+ {#if !direct} +
+
+
{$i18n.t('Type')}
+ +
+ +
+
+
+ {/if} +
@@ -243,38 +327,85 @@
-
- - -
+ {#if ['', 'openapi'].includes(type)} +
+ + +
+ {/if}
-
- {$i18n.t(`WebUI will make requests to "{{url}}"`, { - url: path.includes('://') ? path : `${url}${path.startsWith('/') ? '' : '/'}${path}` - })} -
+ {#if ['', 'openapi'].includes(type)} +
+ {$i18n.t(`WebUI will make requests to "{{url}}"`, { + url: path.includes('://') + ? path + : `${url}${path.startsWith('/') ? '' : '/'}${path}` + })} +
+ {/if}
- +
+
+
+ {$i18n.t('Auth')} +
+
+ + {#if auth_type === 'oauth_2.1'} +
+
+ + + +
+ + {#if !oauthClientInfo} +
+ {$i18n.t('Not Registered')} +
+ {:else} +
+ {$i18n.t('Registered')} +
+ {/if} +
+ {/if} +
@@ -290,6 +421,9 @@ {#if !direct} + {#if type === 'mcp'} + + {/if} {/if}
@@ -319,6 +453,12 @@ > {$i18n.t('Forwards system user OAuth access token to authenticate')}
+ {:else if auth_type === 'oauth_2.1'} +
+ {$i18n.t('Uses OAuth 2.1 Dynamic Client Registration')} +
{/if}
@@ -334,9 +474,12 @@ for="enter-id" class={`mb-0.5 text-xs ${($settings?.highContrastMode ?? false) ? 'text-gray-800 dark:text-gray-100' : 'text-gray-500'}`} >{$i18n.t('ID')} - {$i18n.t('Optional')} + + {#if type !== 'mcp'} + {$i18n.t('Optional')} + {/if}
@@ -347,6 +490,7 @@ bind:value={id} placeholder={$i18n.t('Enter ID')} autocomplete="off" + required={type === 'mcp'} />
@@ -396,13 +540,32 @@
-
+
{/if}
+ {#if type === 'mcp'} +
+ + {$i18n.t('Warning')}: + + {$i18n.t( + 'MCP support is experimental and its specification changes often, which can lead to incompatibilities. OpenAPI specification support is directly maintained by the Open WebUI team, making it the more reliable option for compatibility.' + )} + + {$i18n.t('Read more →')} +
+ {/if} +
{#if edit}