Compare commits

...

1 Commits

Author SHA1 Message Date
LUIS NOVO d962c68288 remove streamlit app 2025-10-18 22:56:14 -03:00
26 changed files with 3 additions and 11886 deletions

View File

@ -1,30 +0,0 @@
[server]
port = 8502
maxMessageSize = 500
# fileWatcherType = "none"
[browser]
serverPort = 8502
[theme]
# # The preset Streamlit theme that your custom theme inherits from.
# # One of "light" or "dark".
base = "light"
# # Primary accent color for interactive elements.
# primaryColor =
# # Background color for the main content area.
# backgroundColor =
# # Background color used for the sidebar and most interactive widgets.
# secondaryBackgroundColor =
# # Color used for almost all text.
# textColor =
# # Font family for all text in the app, except code blocks. One of "sans serif",
# # "serif", or "monospace".
# font =

View File

@ -1,26 +0,0 @@
import nest_asyncio
import streamlit as st
from dotenv import load_dotenv
nest_asyncio.apply()
from pages.components import note_panel, source_insight_panel, source_panel
from pages.stream_app.utils import setup_page
load_dotenv()
setup_page("📒 Open Notebook", sidebar_state="collapsed")
if "object_id" not in st.query_params:
st.switch_page("pages/2_📒_Notebooks.py")
st.stop()
object_id = st.query_params["object_id"]
obj_type = object_id.split(":")[0]
if obj_type == "note":
note_panel(object_id)
elif obj_type == "source":
source_panel(object_id)
elif obj_type == "source_insight":
source_insight_panel(object_id)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,306 +0,0 @@
import os
from typing import Literal, cast
import streamlit as st
from api.settings_service import settings_service
from pages.stream_app.utils import setup_page
setup_page("⚙️ Settings")
st.header("⚙️ Settings")
content_settings = settings_service.get_settings()
with st.container(border=True):
st.markdown("**Content Processing Engine for Documents**")
default_content_processing_engine_doc = st.selectbox(
"Default Content Processing Engine for Documents",
["auto", "docling", "simple"],
index=(
["auto", "docling", "simple"].index(
content_settings.default_content_processing_engine_doc
)
if content_settings.default_content_processing_engine_doc
else 0
),
)
with st.expander("Help me choose"):
st.markdown(
"- Docling is a little slower but more accurate, specially if the documents contain tables and images.\n- Simple will extract any content from the document without formatiing it. It's ok for simple documents, but will lose quality in complex ones.\n- Auto (recommended) will try to process through docling and default to simple."
)
with st.container(border=True):
st.markdown("**Content Processing Engine for URLs**")
firecrawl_enabled = os.getenv("FIRECRAWL_API_KEY") is not None
jina_enabled = os.getenv("JINA_API_KEY") is not None
default_content_processing_engine_url = st.selectbox(
"Default Content Processing Engine for URLs",
["auto", "firecrawl", "jina", "simple"],
index=(
["auto", "firecrawl", "jina", "simple"].index(
content_settings.default_content_processing_engine_url
)
if content_settings.default_content_processing_engine_url
else 0
),
)
if not firecrawl_enabled and default_content_processing_engine_url in [
"firecrawl",
"auto",
]:
st.warning(
"Firecrawl API Key missing. You need to add FIRECRAWL_API_KEY to use it. Get a key at [Firecrawl](https://firecrawl.dev/). If you don't add one, it will default to Jina."
)
if not jina_enabled and default_content_processing_engine_url in [
"jina",
"auto",
]:
st.warning(
"Jina API Key missing. It will work for a few requests a day, but fallback to simple afterwards. Please add JINA_API_KEY to prevent that. Get a key at [Jina.ai](https://jina.ai/)."
)
with st.expander("Help me choose"):
st.markdown(
"- Firecrawl is a paid service (with a free tier), and very powerful.\n- Jina is a good option as well and also has a free tier.\n- Simple will use basic HTTP extraction and will miss content on javascript-based websites.\n- Auto (recommended) will try to use firecrawl (if API Key is present). Then, it will use Jina until reaches the limit (or will keep using Jina if you setup the API Key). It will fallback to simple, when none of the previous options is possible."
)
with st.container(border=True):
st.markdown("**Content Embedding for Vector Search**")
default_embedding_option = st.selectbox(
"Default Embedding Option for Vector Search",
["ask", "always", "never"],
index=(
["ask", "always", "never"].index(content_settings.default_embedding_option)
if content_settings.default_embedding_option
else 0
),
)
with st.expander("Help me choose"):
st.markdown(
"Embedding the content will make it easier to find by you and by your AI agents. If you are running a local embedding model (Ollama, for example), you shouldn't worry about cost and just embed everything. For online providers, you migtht want to be careful only if you process a lot of content (like 100s of documents at a day)."
)
st.markdown(
"\n\n- Choose **always** if you are running a local embedding model or if your content volume is not that big\n- Choose **ask** if you want to decide every time\n- Choose **never** if you don't care about vector search or do not have an embedding provider."
)
st.markdown(
"As a reference, OpenAI's text-embedding-3-small costs about 0.02 for 1 million tokens -- which is about 30 times the [Wikipedia page for Earth](https://en.wikipedia.org/wiki/Earth). With Gemini API, Text Embedding 004 is free with a rate limit of 1500 requests per minute."
)
with st.container(border=True):
st.markdown("**Auto Delete Uploaded Files**")
auto_delete_files = st.selectbox(
"Auto Delete Uploaded Files",
["yes", "no"],
index=(
["yes", "no"].index(content_settings.auto_delete_files)
if content_settings.auto_delete_files
else 0
),
)
with st.expander("Help me choose"):
st.markdown(
"Once your files are uploaded and processed, they are not required anymore. Most users should allow Open Notebook to delete uploaded files from the upload folder automatically. Choose **no**, ONLY if you are using Notebook as the primary storage location for those files (which you shouldn't be at all). This option will soon be deprecated in favor of always downloading the files."
)
st.markdown(
"\n\n- Choose **yes** if you are running a local embedding model or if your content volume is not that big\n- Choose **ask** if you want to decide every time\n- Choose **never** if you don't care about vector search or do not have an embedding provider."
)
with st.container(border=True):
st.markdown("**YouTube Preferred Languages**")
st.caption(
"Languages to prioritize when downloading YouTube transcripts (in order of preference). If the video does not include these languages, we'll get the best transcript possible. Don't worry, the language model will still be able to understand it. "
)
# Available language options with descriptions
language_options = {
"af": "Afrikaans",
"ak": "Akan",
"sq": "Albanian",
"am": "Amharic",
"ar": "Arabic",
"hy": "Armenian",
"as": "Assamese",
"ay": "Aymara",
"az": "Azerbaijani",
"bn": "Bangla",
"eu": "Basque",
"be": "Belarusian",
"bho": "Bhojpuri",
"bs": "Bosnian",
"bg": "Bulgarian",
"my": "Burmese",
"ca": "Catalan",
"ceb": "Cebuano",
"zh": "Chinese",
"zh-HK": "Chinese (Hong Kong)",
"zh-CN": "Chinese (China)",
"zh-SG": "Chinese (Singapore)",
"zh-TW": "Chinese (Taiwan)",
"zh-Hans": "Chinese (Simplified)",
"zh-Hant": "Chinese (Traditional)",
"hak-TW": "Hakka Chinese (Taiwan)",
"nan-TW": "Min Nan Chinese (Taiwan)",
"co": "Corsican",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"dv": "Divehi",
"nl": "Dutch",
"en": "English",
"en-US": "English (United States)",
"eo": "Esperanto",
"et": "Estonian",
"ee": "Ewe",
"fil": "Filipino",
"fi": "Finnish",
"fr": "French",
"gl": "Galician",
"lg": "Ganda",
"ka": "Georgian",
"de": "German",
"el": "Greek",
"gn": "Guarani",
"gu": "Gujarati",
"ht": "Haitian Creole",
"ha": "Hausa",
"haw": "Hawaiian",
"iw": "Hebrew",
"hi": "Hindi",
"hmn": "Hmong",
"hu": "Hungarian",
"is": "Icelandic",
"ig": "Igbo",
"id": "Indonesian",
"ga": "Irish",
"it": "Italian",
"ja": "Japanese",
"jv": "Javanese",
"kn": "Kannada",
"kk": "Kazakh",
"km": "Khmer",
"rw": "Kinyarwanda",
"ko": "Korean",
"kri": "Krio",
"ku": "Kurdish",
"ky": "Kyrgyz",
"lo": "Lao",
"la": "Latin",
"lv": "Latvian",
"ln": "Lingala",
"lt": "Lithuanian",
"lb": "Luxembourgish",
"mk": "Macedonian",
"mg": "Malagasy",
"ms": "Malay",
"ml": "Malayalam",
"mt": "Maltese",
"mi": "Māori",
"mr": "Marathi",
"mn": "Mongolian",
"ne": "Nepali",
"nso": "Northern Sotho",
"no": "Norwegian",
"ny": "Nyanja",
"or": "Odia",
"om": "Oromo",
"ps": "Pashto",
"fa": "Persian",
"pl": "Polish",
"pt": "Portuguese",
"pa": "Punjabi",
"qu": "Quechua",
"ro": "Romanian",
"ru": "Russian",
"sm": "Samoan",
"sa": "Sanskrit",
"gd": "Scottish Gaelic",
"sr": "Serbian",
"sn": "Shona",
"sd": "Sindhi",
"si": "Sinhala",
"sk": "Slovak",
"sl": "Slovenian",
"so": "Somali",
"st": "Southern Sotho",
"es": "Spanish",
"su": "Sundanese",
"sw": "Swahili",
"sv": "Swedish",
"tg": "Tajik",
"ta": "Tamil",
"tt": "Tatar",
"te": "Telugu",
"th": "Thai",
"ti": "Tigrinya",
"ts": "Tsonga",
"tr": "Turkish",
"tk": "Turkmen",
"uk": "Ukrainian",
"ur": "Urdu",
"ug": "Uyghur",
"uz": "Uzbek",
"vi": "Vietnamese",
"cy": "Welsh",
"fy": "Western Frisian",
"xh": "Xhosa",
"yi": "Yiddish",
"yo": "Yoruba",
"zu": "Zulu",
"en-GB": "English (UK)",
}
# Get current preferred languages or use defaults
current_languages = content_settings.youtube_preferred_languages or [
"en",
"pt",
"es",
"de",
"nl",
"en-GB",
"fr",
"de",
"hi",
"ja",
]
youtube_preferred_languages = st.multiselect(
"Select preferred languages (in order of preference)",
options=list(language_options.keys()),
default=current_languages,
format_func=lambda x: f"{language_options[x]} ({x})",
help="YouTube transcripts will be downloaded in the first available language from this list",
)
with st.expander("Help me choose"):
st.markdown(
"When processing YouTube videos, Open Notebook will try to download transcripts in your preferred languages. "
"The order matters - it will try the first language first, then the second if the first isn't available, and so on. "
"If none of your preferred languages are available, it will fall back to any available transcript."
)
st.markdown(
"**Tip**: Put your most preferred language first. For example, if you speak both English and Spanish, "
"but prefer English content, put 'en' before 'es' in your selection."
)
if st.button("Save", key="save_settings"):
# Type checking for literal assignments
if default_content_processing_engine_doc in ("auto", "docling", "simple"):
content_settings.default_content_processing_engine_doc = cast(
Literal["auto", "docling", "simple"], default_content_processing_engine_doc
)
if default_content_processing_engine_url in ("auto", "firecrawl", "jina", "simple"):
content_settings.default_content_processing_engine_url = cast(
Literal["auto", "firecrawl", "jina", "simple"], default_content_processing_engine_url
)
if default_embedding_option in ("ask", "always", "never"):
content_settings.default_embedding_option = cast(Literal["ask", "always", "never"], default_embedding_option)
if auto_delete_files in ("yes", "no"):
content_settings.auto_delete_files = cast(Literal["yes", "no"], auto_delete_files)
content_settings.youtube_preferred_languages = youtube_preferred_languages
settings_service.update_settings(content_settings)
st.toast("Settings saved successfully!")

View File

@ -1,262 +0,0 @@
import time
import streamlit as st
from loguru import logger
from api.client import api_client
from pages.stream_app.utils import setup_page
setup_page("🔧 Advanced")
st.header("🔧 Advanced")
# =============================================================================
# Rebuild Embeddings Section
# =============================================================================
with st.container(border=True):
st.markdown("### 🔄 Rebuild Embeddings")
st.caption(
"Rebuild vector embeddings for your content. Use this when switching embedding models "
"or fixing corrupted embeddings."
)
col1, col2 = st.columns(2)
with col1:
rebuild_mode = st.selectbox(
"Rebuild Mode",
["existing", "all"],
index=0,
help="Choose which items to rebuild",
)
with st.expander("Help me choose"):
st.markdown("""
**Existing**: Re-embed only items that already have embeddings
- Use this when switching embedding models
- Faster, processes only embedded content
- Maintains your current embedded item list
**All**: Re-embed existing items + create embeddings for items without any
- Use this to embed everything in your database
- Slower, processes all content
- Finds and embeds previously un-embedded items
""")
with col2:
st.markdown("**Include in Rebuild:**")
include_sources = st.checkbox("Sources", value=True, help="Include source documents")
include_notes = st.checkbox("Notes", value=True, help="Include notes")
include_insights = st.checkbox("Insights", value=True, help="Include source insights")
# Check if at least one type is selected
if not (include_sources or include_notes or include_insights):
st.warning("⚠️ Please select at least one item type to rebuild")
# Rebuild button
if st.button("🚀 Start Rebuild", type="primary", disabled=not (include_sources or include_notes or include_insights)):
with st.spinner("Starting rebuild..."):
try:
result = api_client.rebuild_embeddings(
mode=rebuild_mode,
include_sources=include_sources,
include_notes=include_notes,
include_insights=include_insights
)
if isinstance(result, dict):
command_id = result.get("command_id")
estimated_items = result.get("estimated_items", 0)
else:
raise ValueError("Invalid result from rebuild_embeddings")
# Store command ID in session state for status tracking
st.session_state["rebuild_command_id"] = command_id
st.session_state["rebuild_start_time"] = time.time()
st.success(f"✅ Rebuild started! Processing approximately {estimated_items} items.")
st.info(f"Command ID: `{command_id}`")
st.rerun()
except Exception as e:
logger.error(f"Failed to start rebuild: {e}")
st.error(f"❌ Failed to start rebuild: {str(e)}")
# =============================================================================
# Rebuild Status Section
# =============================================================================
# Show status if a rebuild is in progress
if "rebuild_command_id" in st.session_state:
command_id = st.session_state["rebuild_command_id"]
with st.container(border=True):
st.markdown("### 📊 Rebuild Status")
# Create placeholder for dynamic updates
status_placeholder = st.empty()
progress_placeholder = st.empty()
stats_placeholder = st.empty()
try:
status_result = api_client.get_rebuild_status(command_id)
if isinstance(status_result, dict):
status = status_result.get("status")
progress = status_result.get("progress")
stats = status_result.get("stats")
error_message = status_result.get("error_message")
started_at = status_result.get("started_at")
completed_at = status_result.get("completed_at")
else:
status = None
progress = None
stats = None
error_message = None
started_at = None
completed_at = None
# Calculate elapsed time
if "rebuild_start_time" in st.session_state:
elapsed = time.time() - st.session_state["rebuild_start_time"]
elapsed_str = f"{int(elapsed // 60)}m {int(elapsed % 60)}s"
else:
elapsed_str = "Unknown"
# Show status
if status == "queued":
status_placeholder.info("⏳ **Status**: Queued (waiting to start)")
elif status == "running":
status_placeholder.info(f"⚙️ **Status**: Running... (Elapsed: {elapsed_str})")
# Auto-refresh every 5 seconds if running
time.sleep(5)
st.rerun()
elif status == "completed":
status_placeholder.success("✅ **Status**: Completed!")
# Clear session state
if "rebuild_command_id" in st.session_state:
del st.session_state["rebuild_command_id"]
if "rebuild_start_time" in st.session_state:
del st.session_state["rebuild_start_time"]
elif status == "failed":
status_placeholder.error("❌ **Status**: Failed")
if error_message:
st.error(f"Error: {error_message}")
# Clear session state
if "rebuild_command_id" in st.session_state:
del st.session_state["rebuild_command_id"]
if "rebuild_start_time" in st.session_state:
del st.session_state["rebuild_start_time"]
# Show progress if available
if progress:
total = progress.get("total_items", 0)
processed = progress.get("processed_items", 0)
failed = progress.get("failed_items", 0)
if total > 0:
progress_pct = (processed / total) * 100
progress_placeholder.progress(
progress_pct / 100,
text=f"Progress: {processed}/{total} items ({progress_pct:.1f}%)"
)
if failed > 0:
st.warning(f"⚠️ {failed} items failed to process")
# Show stats if available
if stats:
with stats_placeholder.container():
st.markdown("#### Processing Statistics")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.metric("Sources", stats.get("sources_processed", 0))
with col2:
st.metric("Notes", stats.get("notes_processed", 0))
with col3:
st.metric("Insights", stats.get("insights_processed", 0))
with col4:
processing_time = stats.get("processing_time", 0)
st.metric("Time", f"{processing_time:.1f}s")
# Show timestamps
if started_at or completed_at:
st.markdown("---")
col1, col2 = st.columns(2)
if started_at:
with col1:
st.caption(f"Started: {started_at}")
if completed_at:
with col2:
st.caption(f"Completed: {completed_at}")
except Exception as e:
logger.error(f"Failed to get rebuild status: {e}")
status_placeholder.error(f"❌ Failed to get status: {str(e)}")
# Clear session state on error
if "rebuild_command_id" in st.session_state:
del st.session_state["rebuild_command_id"]
if "rebuild_start_time" in st.session_state:
del st.session_state["rebuild_start_time"]
# =============================================================================
# Additional Info Section
# =============================================================================
with st.container(border=True):
st.markdown("### About Embedding Rebuilds")
with st.expander("When should I rebuild embeddings?"):
st.markdown("""
**You should rebuild embeddings when:**
1. **Switching embedding models**: If you change from one embedding model to another (e.g., from OpenAI to Google Gemini),
you need to rebuild all embeddings to ensure consistency.
2. **Upgrading embedding model versions**: When updating to a newer version of your embedding model,
rebuild to take advantage of improvements.
3. **Fixing corrupted embeddings**: If you suspect some embeddings are corrupted or missing,
rebuilding can restore them.
4. **After bulk imports**: If you imported content without embeddings, use "All" mode to embed everything.
""")
with st.expander("How long does rebuilding take?"):
st.markdown("""
**Processing time depends on:**
- Number of items to process
- Embedding model speed
- API rate limits (for cloud providers)
- System resources
**Typical rates:**
- **Local models** (Ollama): Very fast, limited only by hardware
- **Cloud APIs** (OpenAI, Google): Moderate speed, may hit rate limits with large datasets
- **Sources**: Slower than notes/insights (creates multiple chunks per source)
**Example**: Rebuilding 200 items might take 2-5 minutes with cloud APIs, or under 1 minute with local models.
""")
with st.expander("Is it safe to rebuild while using the app?"):
st.markdown("""
**Yes, rebuilding is safe!** The rebuild process:
**Is idempotent**: Running multiple times produces the same result
**Doesn't delete content**: Only replaces embeddings
**Can be run anytime**: No need to stop other operations
**Handles errors gracefully**: Failed items are logged and skipped
**However**: Very large rebuilds (1000s of items) may temporarily slow down searches while processing.
""")

View File

@ -1,152 +0,0 @@
import streamlit as st
from humanize import naturaltime
from api.notebook_service import notebook_service
from api.notes_service import notes_service
from api.sources_service import sources_service
from open_notebook.domain.notebook import Notebook
from pages.stream_app.chat import chat_sidebar
from pages.stream_app.note import add_note, note_card
from pages.stream_app.source import add_source, source_card
from pages.stream_app.utils import setup_page, setup_stream_state
setup_page("📒 Open Notebook", only_check_mandatory_models=True)
def notebook_header(current_notebook: Notebook):
"""
Defines the header of the notebook page, including the ability to edit the notebook's name and description.
"""
c1, c2, c3 = st.columns([8, 2, 2])
c1.header(current_notebook.name)
if c2.button("Back to the list", icon="🔙"):
st.session_state["current_notebook_id"] = None
st.rerun()
if c3.button("Refresh", icon="🔄"):
st.rerun()
current_description = current_notebook.description
with st.expander(
current_notebook.description
if len(current_description) > 0
else "click to add a description"
):
notebook_name = st.text_input("Name", value=current_notebook.name)
notebook_description = st.text_area(
"Description",
value=current_description,
placeholder="Add as much context as you can as this will be used by the AI to generate insights.",
)
c1, c2, c3 = st.columns([1, 1, 1])
if c1.button("Save", icon="💾", key="edit_notebook"):
current_notebook.name = notebook_name
current_notebook.description = notebook_description
notebook_service.update_notebook(current_notebook)
st.rerun()
if not current_notebook.archived:
if c2.button("Archive", icon="🗃️"):
current_notebook.archived = True
notebook_service.update_notebook(current_notebook)
st.toast("Notebook archived", icon="🗃️")
else:
if c2.button("Unarchive", icon="🗃️"):
current_notebook.archived = False
notebook_service.update_notebook(current_notebook)
st.toast("Notebook unarchived", icon="🗃️")
if c3.button("Delete forever", type="primary", icon="☠️"):
notebook_service.delete_notebook(current_notebook)
st.session_state["current_notebook_id"] = None
st.rerun()
def notebook_page(current_notebook: Notebook):
# Guarantees that we have an entry for this notebook in the session state
if current_notebook.id and current_notebook.id not in st.session_state:
st.session_state[current_notebook.id] = {"notebook": current_notebook} # type: ignore[index]
# sets up the active session
current_session = setup_stream_state(
current_notebook=current_notebook,
)
sources = sources_service.get_all_sources(notebook_id=current_notebook.id)
notes = notes_service.get_all_notes(notebook_id=current_notebook.id)
notebook_header(current_notebook)
work_tab, chat_tab = st.columns([4, 2])
with work_tab:
sources_tab, notes_tab = st.columns(2)
with sources_tab:
with st.container(border=True):
if st.button("Add Source", icon=""):
add_source(current_notebook.id)
for source in sources:
source_card(source=source, notebook_id=current_notebook.id)
with notes_tab:
with st.container(border=True):
if st.button("Write a Note", icon="📝"):
add_note(current_notebook.id)
for note in notes:
note_card(note=note, notebook_id=current_notebook.id)
with chat_tab:
chat_sidebar(current_notebook=current_notebook, current_session=current_session)
def notebook_list_item(notebook):
with st.container(border=True):
st.subheader(notebook.name)
st.caption(
f"Created: {naturaltime(notebook.created)}, updated: {naturaltime(notebook.updated)}"
)
st.write(notebook.description)
if st.button("Open", key=f"open_notebook_{notebook.id}"):
st.session_state["current_notebook_id"] = notebook.id
st.rerun()
if "current_notebook_id" not in st.session_state:
st.session_state["current_notebook_id"] = None
# todo: get the notebook, check if it exists and if it's archived
current_nb_id = st.session_state["current_notebook_id"] # type: ignore[index]
if current_nb_id:
current_notebook: Notebook | None = notebook_service.get_notebook(current_nb_id)
if not current_notebook:
st.error("Notebook not found")
st.stop()
# Type narrowing: st.stop() exits, so current_notebook is guaranteed to be Notebook here
notebook_page(current_notebook) # type: ignore[arg-type]
st.stop()
st.title("📒 My Notebooks")
st.caption(
"Notebooks are a great way to organize your thoughts, ideas, and sources. You can create notebooks for different research topics and projects, to create new articles, etc. "
)
with st.expander(" **New Notebook**"):
new_notebook_title = st.text_input("New Notebook Name")
new_notebook_description = st.text_area(
"Description",
placeholder="Explain the purpose of this notebook. The more details the better.",
)
if st.button("Create a new Notebook", icon=""):
notebook = notebook_service.create_notebook(
name=new_notebook_title, description=new_notebook_description
)
st.toast("Notebook created successfully", icon="📒")
notebooks = notebook_service.get_all_notebooks(order_by="updated desc")
archived_notebooks = [nb for nb in notebooks if nb.archived]
for notebook in notebooks:
if notebook.archived:
continue
notebook_list_item(notebook)
if len(archived_notebooks) > 0:
with st.expander(f"**🗃️ {len(archived_notebooks)} archived Notebooks**"):
st.write(" Archived Notebooks can still be accessed and used in search.")
for notebook in archived_notebooks:
notebook_list_item(notebook)

View File

@ -1,152 +0,0 @@
import streamlit as st
from api.models_service import ModelsService
from api.notebook_service import notebook_service
from api.notes_service import notes_service
from api.search_service import search_service
from pages.components.model_selector import model_selector
from pages.stream_app.utils import convert_source_references, setup_page
# Initialize service instances
models_service = ModelsService()
setup_page("🔍 Search")
ask_tab, search_tab = st.tabs(["Ask Your Knowledge Base (beta)", "Search"])
if "search_results" not in st.session_state:
st.session_state["search_results"] = []
if "ask_results" not in st.session_state:
st.session_state["ask_results"] = {}
def results_card(item):
with st.container(border=True):
st.markdown(
f"[{item['final_score']:.2f}] **[{item['title']}](/?object_id={item['parent_id']})**"
)
if "matches" in item:
with st.expander("Matches"):
for match in item["matches"]:
st.markdown(match)
with ask_tab:
st.subheader("Ask Your Knowledge Base (beta)")
st.caption(
"The LLM will answer your query based on the documents in your knowledge base. "
)
question = st.text_input("Question", "")
default_models = models_service.get_default_models()
default_model = default_models.default_chat_model
strategy_model = model_selector(
"Query Strategy Model",
"strategy_model",
selected_id=default_model,
model_type="language",
help="This is the LLM that will be responsible for strategizing the search",
)
answer_model = model_selector(
"Individual Answer Model",
"answer_model",
model_type="language",
selected_id=default_model,
help="This is the LLM that will be responsible for processing individual subqueries",
)
final_answer_model = model_selector(
"Final Answer Model",
"final_answer_model",
model_type="language",
selected_id=default_model,
help="This is the LLM that will be responsible for processing the final answer",
)
embedding_model = default_models.default_embedding_model
if not embedding_model:
st.warning(
"You can't use this feature because you have no embedding model selected. Please set one up in the Models page."
)
ask_bt = st.button("Ask") if embedding_model else None
placeholder = st.container()
if ask_bt:
placeholder.write(f"Searching for {question}")
st.session_state["ask_results"]["question"] = question
st.session_state["ask_results"]["answer"] = None
if not strategy_model.id or not answer_model.id or not final_answer_model.id:
placeholder.error("One or more selected models has no ID")
else:
with st.spinner("Processing your question..."):
try:
result = search_service.ask_knowledge_base(
question=question,
strategy_model=strategy_model.id,
answer_model=answer_model.id,
final_answer_model=final_answer_model.id,
)
if isinstance(result, dict) and result.get("answer"):
st.session_state["ask_results"]["answer"] = result["answer"]
with placeholder.container(border=True):
st.markdown(convert_source_references(result["answer"]))
else:
placeholder.error("No answer generated")
except Exception as e:
placeholder.error(f"Error processing question: {str(e)}")
if st.session_state["ask_results"].get("answer"):
with st.container(border=True):
with st.form("save_note_form"):
notebook = st.selectbox(
"Notebook",
notebook_service.get_all_notebooks(),
format_func=lambda x: x.name,
)
if st.form_submit_button("Save Answer as Note"):
notes_service.create_note(
title=st.session_state["ask_results"]["question"],
content=st.session_state["ask_results"]["answer"],
note_type="ai",
notebook_id=notebook.id,
)
st.success("Note saved successfully")
with search_tab:
with st.container(border=True):
st.subheader("🔍 Search")
st.caption("Search your knowledge base for specific keywords or concepts")
search_term = st.text_input("Search", "")
if not embedding_model:
st.warning(
"You can't use vector search because you have no embedding model selected. Only text search will be available."
)
search_type = "Text Search"
else:
search_type = st.radio("Search Type", ["Text Search", "Vector Search"])
search_sources = st.checkbox("Search Sources", value=True)
search_notes = st.checkbox("Search Notes", value=True)
if st.button("Search"):
st.write(f"Searching for {search_term}")
search_type_api = "text" if search_type == "Text Search" else "vector"
st.session_state["search_results"] = search_service.search(
query=search_term,
search_type=search_type_api,
limit=100,
search_sources=search_sources,
search_notes=search_notes,
)
search_results = st.session_state["search_results"].copy()
for item in search_results:
item["final_score"] = item.get(
"relevance", item.get("similarity", item.get("score", 0))
)
# Sort search results by final_score in descending order
search_results.sort(key=lambda x: x["final_score"], reverse=True)
for item in search_results:
results_card(item)

File diff suppressed because it is too large Load Diff

View File

@ -1,374 +0,0 @@
import os
import nest_asyncio
nest_asyncio.apply()
import streamlit as st
from esperanto import AIFactory
from api.models_service import models_service
from open_notebook.domain.models import Model
from pages.components.model_selector import model_selector
from pages.stream_app.utils import setup_page
setup_page(
"🤖 Models",
only_check_mandatory_models=False,
stop_on_model_error=False,
skip_model_check=True,
)
st.title("🤖 Models")
provider_status = {}
model_types = [
# "vision",
"language",
"embedding",
"text_to_speech",
"speech_to_text",
]
def check_available_providers():
provider_status["ollama"] = os.environ.get("OLLAMA_API_BASE") is not None
provider_status["openai"] = os.environ.get("OPENAI_API_KEY") is not None
provider_status["groq"] = os.environ.get("GROQ_API_KEY") is not None
provider_status["xai"] = os.environ.get("XAI_API_KEY") is not None
provider_status["vertex"] = (
os.environ.get("VERTEX_PROJECT") is not None
and os.environ.get("VERTEX_LOCATION") is not None
and os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") is not None
)
provider_status["google"] = (
os.environ.get("GOOGLE_API_KEY") is not None
or os.environ.get("GEMINI_API_KEY") is not None
)
provider_status["openrouter"] = os.environ.get("OPENROUTER_API_KEY") is not None
provider_status["anthropic"] = os.environ.get("ANTHROPIC_API_KEY") is not None
provider_status["elevenlabs"] = os.environ.get("ELEVENLABS_API_KEY") is not None
provider_status["voyage"] = os.environ.get("VOYAGE_API_KEY") is not None
provider_status["azure"] = (
os.environ.get("AZURE_OPENAI_API_KEY") is not None
and os.environ.get("AZURE_OPENAI_ENDPOINT") is not None
and os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME") is not None
and os.environ.get("AZURE_OPENAI_API_VERSION") is not None
)
provider_status["mistral"] = os.environ.get("MISTRAL_API_KEY") is not None
provider_status["deepseek"] = os.environ.get("DEEPSEEK_API_KEY") is not None
provider_status["openai-compatible"] = (
os.environ.get("OPENAI_COMPATIBLE_BASE_URL") is not None
)
available_providers = [k for k, v in provider_status.items() if v]
unavailable_providers = [k for k, v in provider_status.items() if not v]
return available_providers, unavailable_providers
default_models = models_service.get_default_models()
all_models = models_service.get_all_models()
esperanto_available_providers = AIFactory.get_available_providers()
st.subheader("Provider Availability")
st.markdown(
"Below, you'll find all AI providers supported and their current availability status. To enable more providers, you need to setup some of their ENV Variables. Please check [the documentation](https://github.com/lfnovo/open-notebook/blob/main/docs/features/ai-models.md) for instructions on how to do so."
)
available_providers, unavailable_providers = check_available_providers()
with st.expander("Available Providers"):
st.write(available_providers)
with st.expander("Unavailable Providers"):
st.write(unavailable_providers)
st.divider()
# Helper function to add model with auto-save
def add_model_form(model_type, container_key, configured_providers):
# Get providers that Esperanto supports for this model type
esperanto_providers = esperanto_available_providers.get(model_type, [])
# Filter to only show providers that have API keys configured
available_providers = [p for p in esperanto_providers if p in configured_providers]
# Sort providers alphabetically for easier navigation
available_providers.sort()
# Remove perplexity from available_providers if it exists
if "perplexity" in available_providers:
available_providers.remove("perplexity")
if not available_providers:
st.info(f"No providers available for {model_type}")
return
st.markdown("**Add New Model**")
with st.form(key=f"add_{model_type}_{container_key}"):
provider = st.selectbox(
"Provider",
available_providers,
key=f"provider_{model_type}_{container_key}",
)
model_name = st.text_input(
"Model Name",
key=f"name_{model_type}_{container_key}",
help="gpt-5-mini, claude, gemini, llama3, etc. For azure, use the deployment_name as the model_name",
)
if st.form_submit_button("Add Model"):
if model_name:
models_service.create_model(
name=model_name, provider=provider, model_type=model_type
)
st.success("Model added!")
st.rerun()
# Helper function to handle default model selection with auto-save
def handle_default_selection(
label, key, current_value, help_text, model_type, caption=None
):
selected_model = model_selector(
label,
key,
selected_id=current_value,
help=help_text,
model_type=model_type,
)
# Auto-save when selection changes
if selected_model and (not current_value or selected_model.id != current_value):
setattr(default_models, key, selected_model.id)
models_service.update_default_models(default_models)
# Model defaults are automatically refreshed through the API service
st.toast(f"Default {model_type} model set to {selected_model.name}")
elif not selected_model and current_value:
setattr(default_models, key, None)
models_service.update_default_models(default_models)
# Model defaults are automatically refreshed through the API service
st.toast(f"Default {model_type} model removed")
if caption:
st.caption(caption)
return selected_model
# Group models by type
models_by_type: dict[str, list[Model]] = {
"language": [],
"embedding": [],
"text_to_speech": [],
"speech_to_text": [],
}
for model in all_models:
if model.type in models_by_type:
models_by_type[model.type].append(model)
st.markdown("""
**Model Management Guide:** For optimal performance, refer to [Which model to choose?](https://github.com/lfnovo/open-notebook/blob/main/docs/features/ai-models.md)
You can test models in the [Transformations](Transformations) page.
""")
# Language Models Section
st.subheader("🗣️ Language Models")
with st.container(border=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("**Configured Models**")
language_models = models_by_type["language"]
if language_models:
for model in language_models:
subcol1, subcol2 = st.columns([4, 1])
with subcol1:
st.markdown(f"{model.provider}/{model.name}")
with subcol2:
if st.button(
"🗑️", key=f"delete_lang_{model.id}", help="Delete model"
):
if model.id:
models_service.delete_model(model.id)
st.rerun()
else:
st.info("No language models configured")
with col2:
add_model_form("language", "main", available_providers)
st.markdown("**Default Model Assignments**")
col1, col2 = st.columns(2)
with col1:
handle_default_selection(
"Chat Model",
"default_chat_model",
default_models.default_chat_model,
"Used for chat conversations",
"language",
"Pick the one that vibes with you.",
)
handle_default_selection(
"Tools Model",
"default_tools_model",
default_models.default_tools_model,
"Used for calling tools - use OpenAI or Anthropic",
"language",
"Recommended: gpt-4o, claude, qwen3, etc.",
)
with col2:
handle_default_selection(
"Transformation Model",
"default_transformation_model",
default_models.default_transformation_model,
"Used for summaries, insights, etc.",
"language",
"Can use cheaper models: gpt-5-mini, llama3, gemma3, etc.",
)
handle_default_selection(
"Large Context Model",
"large_context_model",
default_models.large_context_model,
"Used for large context processing",
"language",
"Recommended: Gemini models",
)
# Show warning if mandatory language models are missing
if (
not default_models.default_chat_model
or not default_models.default_transformation_model
):
st.warning(
"⚠️ Please select a Chat Model and Transformation Model - these are required for Open Notebook to function properly."
)
elif not default_models.default_tools_model:
st.info(
"💡 Consider selecting a Tools Model for better tool calling capabilities (recommended: OpenAI or Anthropic models)."
)
# Embedding Models Section
st.subheader("🔍 Embedding Models")
with st.container(border=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("**Configured Models**")
embedding_models = models_by_type["embedding"]
if embedding_models:
for model in embedding_models:
subcol1, subcol2 = st.columns([4, 1])
with subcol1:
st.markdown(f"{model.provider}/{model.name}")
with subcol2:
if st.button(
"🗑️", key=f"delete_emb_{model.id}", help="Delete model"
):
if model.id:
models_service.delete_model(model.id)
st.rerun()
else:
st.info("No embedding models configured")
handle_default_selection(
"Default Embedding Model",
"default_embedding_model",
default_models.default_embedding_model,
"Used for semantic search and embeddings",
"embedding",
)
st.warning("⚠️ Changing embedding models requires regenerating all embeddings")
# Show warning if no default embedding model is selected
if not default_models.default_embedding_model:
st.warning(
"⚠️ Please select a default Embedding Model - this is required for search functionality."
)
with col2:
add_model_form("embedding", "main", available_providers)
# Text-to-Speech Models Section
st.subheader("🎙️ Text-to-Speech Models")
with st.container(border=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("**Configured Models**")
tts_models = models_by_type["text_to_speech"]
if tts_models:
for model in tts_models:
subcol1, subcol2 = st.columns([4, 1])
with subcol1:
st.markdown(f"{model.provider}/{model.name}")
with subcol2:
if st.button(
"🗑️", key=f"delete_tts_{model.id}", help="Delete model"
):
if model.id:
models_service.delete_model(model.id)
st.rerun()
else:
st.info("No text-to-speech models configured")
handle_default_selection(
"Default TTS Model",
"default_text_to_speech_model",
default_models.default_text_to_speech_model,
"Used for podcasts and audio generation",
"text_to_speech",
"Can be overridden per podcast",
)
# Show info if no default TTS model is selected
if not default_models.default_text_to_speech_model:
st.info(" Select a default TTS model to enable podcast generation.")
with col2:
add_model_form("text_to_speech", "main", available_providers)
# Speech-to-Text Models Section
st.subheader("🎤 Speech-to-Text Models")
with st.container(border=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("**Configured Models**")
stt_models = models_by_type["speech_to_text"]
if stt_models:
for model in stt_models:
subcol1, subcol2 = st.columns([4, 1])
with subcol1:
st.markdown(f"{model.provider}/{model.name}")
with subcol2:
if st.button(
"🗑️", key=f"delete_stt_{model.id}", help="Delete model"
):
if model.id:
models_service.delete_model(model.id)
st.rerun()
else:
st.info("No speech-to-text models configured")
handle_default_selection(
"Default STT Model",
"default_speech_to_text_model",
default_models.default_speech_to_text_model,
"Used for audio transcriptions",
"speech_to_text",
)
# Show info if no default STT model is selected
if not default_models.default_speech_to_text_model:
st.info(
" Select a default STT model to enable audio transcription features."
)
with col2:
add_model_form("speech_to_text", "main", available_providers)

View File

@ -1,152 +0,0 @@
import streamlit as st
from api.transformations_service import transformations_service
from open_notebook.domain.transformation import DefaultPrompts, Transformation
from pages.components.model_selector import model_selector
from pages.stream_app.utils import setup_page
setup_page("🧩 Transformations")
transformations_tab, playground_tab = st.tabs(["🧩 Transformations", "🛝 Playground"])
if "transformations" not in st.session_state:
st.session_state.transformations = transformations_service.get_all_transformations()
else:
# work-around for streamlit losing typing on session state
st.session_state.transformations = [
Transformation(**trans.model_dump())
for trans in st.session_state.transformations
]
with transformations_tab:
st.header("🧩 Transformations")
st.markdown(
"Transformations are prompts that will be used by the LLM to process a source and extract insights, summaries, etc. "
)
default_prompts: DefaultPrompts = DefaultPrompts(transformation_instructions=None)
with st.expander("**⚙️ Default Transformation Prompt**"):
default_prompts.transformation_instructions = st.text_area(
"Default Transformation Prompt",
default_prompts.transformation_instructions,
height=300,
)
st.caption("This will be added to all your transformation prompts.")
if st.button("Save", key="save_default_prompt"):
default_prompts.update()
st.toast("Default prompt saved successfully!")
if st.button("Create new Transformation", icon="", key="new_transformation"):
new_transformation = transformations_service.create_transformation(
name="New Transformation",
title="New Transformation Title",
description="New Transformation Description",
prompt="New Transformation Prompt",
apply_default=False,
)
st.session_state.transformations.insert(0, new_transformation)
st.rerun()
st.divider()
st.markdown("Your Transformations")
if len(st.session_state.transformations) == 0:
st.markdown(
"No transformation created yet. Click 'Create new transformation' to get started."
)
else:
for idx, transformation in enumerate(st.session_state.transformations):
transform_expander = f"**{transformation.name}**" + (
" - default" if transformation.apply_default else ""
)
with st.expander(
transform_expander,
expanded=(transformation.id is None),
):
name = st.text_input(
"Transformation Name",
transformation.name,
key=f"{transformation.id}_name",
)
title = st.text_input(
"Card Title (this will be the title of all cards created by this transformation). ie 'Key Topics'",
transformation.title,
key=f"{transformation.id}_title",
)
description = st.text_area(
"Description (displayed as a hint in the UI so you know what you are selecting)",
transformation.description,
key=f"{transformation.id}_description",
)
prompt = st.text_area(
"Prompt",
transformation.prompt,
key=f"{transformation.id}_prompt",
height=300,
)
st.markdown(
"You can use the prompt to summarize, expand, extract insights and much more. Example: `Translate this text to French`. For inspiration, check out this [great resource](https://github.com/danielmiessler/fabric/tree/main/patterns)."
)
apply_default = st.checkbox(
"Suggest by default on new sources",
transformation.apply_default,
key=f"{transformation.id}_apply_default",
)
if st.button("Save", key=f"{transformation.id}_save"):
transformation.name = name
transformation.title = title
transformation.description = description
transformation.prompt = prompt
transformation.apply_default = apply_default
st.toast(f"Transformation '{name}' saved successfully!")
transformations_service.update_transformation(transformation)
st.rerun()
if transformation.id:
with st.popover("Other actions"):
if st.button(
"Use in Playground",
icon="🛝",
key=f"{transformation.id}_playground",
):
st.stop()
if st.button(
"Delete", icon="", key=f"{transformation.id}_delete"
):
transformations_service.delete_transformation(transformation.id)
st.session_state.transformations.remove(transformation)
st.toast(f"Transformation '{name}' deleted successfully!")
st.rerun()
with playground_tab:
st.title("🛝 Playground")
transformation = st.selectbox(
"Pick a transformation",
st.session_state.transformations,
format_func=lambda x: x.name,
)
model = model_selector(
"Pick a pattern model",
key="model",
help="This is the model that will be used to run the transformation",
model_type="language",
)
input_text = st.text_area("Enter some text", height=200)
if st.button("Run"):
if transformation and model and input_text:
if not model.id:
st.error("Selected model has no ID")
else:
result = transformations_service.execute_transformation(
transformation_id=transformation.id,
input_text=input_text,
model_id=model.id
)
if isinstance(result, dict):
st.markdown(result.get("output", ""))
else:
st.warning("Please select a transformation, model, and enter some text.")

View File

View File

@ -1,9 +0,0 @@
from pages.components.note_panel import note_panel
from pages.components.source_insight import source_insight_panel
from pages.components.source_panel import source_panel
__all__ = [
"note_panel",
"source_insight_panel",
"source_panel",
]

View File

@ -1,39 +0,0 @@
from typing import Literal
import streamlit as st
from api.models_service import ModelsService
from open_notebook.domain.models import Model
# Initialize service instance
models_service = ModelsService()
def model_selector(
label,
key,
selected_id=None,
help=None,
model_type: Literal[
"language", "embedding", "speech_to_text", "text_to_speech"
] = "language",
) -> Model:
models = models_service.get_all_models(model_type=model_type)
models.sort(key=lambda x: (x.provider, x.name))
try:
index = (
next((i for i, m in enumerate(models) if m.id == selected_id), 0)
if selected_id
else 0
)
except Exception:
index = 0
return st.selectbox(
label,
models,
format_func=lambda x: f"{x.provider} - {x.name}",
help=help,
index=index,
key=key,
)

View File

@ -1,49 +0,0 @@
import streamlit as st
from loguru import logger
from streamlit_monaco import st_monaco # type: ignore
from api.models_service import ModelsService
from api.notes_service import NotesService
from pages.stream_app.utils import convert_source_references
# Initialize service instances
models_service = ModelsService()
notes_service = NotesService()
def note_panel(note_id, notebook_id=None):
default_models = models_service.get_default_models()
if not default_models.default_embedding_model:
st.warning(
"Since there is no embedding model selected, your note will be saved but not searchable."
)
note = notes_service.get_note(note_id)
if not note:
raise ValueError(f"Note not fonud {note_id}")
t_preview, t_edit = st.tabs(["Preview", "Edit"])
with t_preview:
st.subheader(note.title)
st.markdown(convert_source_references(note.content))
with t_edit:
note.title = st.text_input("Title", value=note.title)
note.content = st_monaco(
value=note.content, height="300px", language="markdown"
)
b1, b2 = st.columns(2)
if b1.button("Save", key=f"pn_edit_note_{note.id or 'new'}"):
logger.debug("Editing note")
if note.id:
notes_service.update_note(note)
else:
note = notes_service.create_note(
content=note.content or "",
title=note.title,
note_type=note.note_type or "human", # type: ignore[arg-type]
notebook_id=notebook_id,
)
st.rerun()
if b2.button("Delete", type="primary", key=f"delete_note_{note.id or 'new'}"):
logger.debug("Deleting note")
if note.id:
notes_service.delete_note(note.id)
st.rerun()

View File

@ -1,27 +0,0 @@
import asyncio
import nest_asyncio
import streamlit as st
nest_asyncio.apply()
from api.insights_service import insights_service
from open_notebook.domain.notebook import SourceInsight
def source_insight_panel(source, notebook_id=None):
si: SourceInsight = insights_service.get_insight(source)
if not si:
raise ValueError(f"insight not found {source}")
st.subheader(si.insight_type)
with st.container(border=True):
# Get source information by querying the database relationship
source_obj = asyncio.run(si.get_source())
url = f"Navigator?object_id={source_obj.id}"
st.markdown("**Original Source**")
st.markdown(f"{source_obj.title} [link](%s)" % url)
st.markdown(si.content)
if st.button("Delete", type="primary", key=f"delete_insight_{si.id or 'new'}"):
insights_service.delete_insight(si.id or "")
st.rerun()

View File

@ -1,115 +0,0 @@
import streamlit as st
from humanize import naturaltime
from api.insights_service import insights_service
from api.models_service import ModelsService
from api.sources_service import SourcesService
from api.transformations_service import TransformationsService
from pages.stream_app.utils import check_models
# Initialize service instances
sources_service = SourcesService()
transformations_service = TransformationsService()
models_service = ModelsService()
def source_panel(source_id: str, notebook_id=None, modal=False):
check_models(only_mandatory=False)
source_with_metadata = sources_service.get_source(source_id)
if not source_with_metadata:
raise ValueError(f"Source not found: {source_id}")
# Now we can access both the source and embedded_chunks directly
current_title = source_with_metadata.title if source_with_metadata.title else "No Title"
source_with_metadata.title = st.text_input("Title", value=current_title)
if source_with_metadata.title != current_title:
sources_service.update_source(source_with_metadata.source)
st.toast("Saved new Title")
process_tab, source_tab = st.tabs(["Process", "Source"])
with process_tab:
c1, c2 = st.columns([4, 2])
with c1:
title = st.empty()
if source_with_metadata.title:
title.subheader(source_with_metadata.title)
if source_with_metadata.asset and source_with_metadata.asset.url:
from_src = f"from URL: {source_with_metadata.asset.url}"
elif source_with_metadata.asset and source_with_metadata.asset.file_path:
from_src = f"from file: {source_with_metadata.asset.file_path}"
else:
from_src = "from text"
st.caption(f"Created {naturaltime(source_with_metadata.created)}, {from_src}")
for insight in insights_service.get_source_insights(source_with_metadata.id):
with st.expander(f"**{insight.insight_type}**"):
st.markdown(insight.content)
x1, x2 = st.columns(2)
if x1.button(
"Delete", type="primary", key=f"delete_insight_{insight.id}"
):
insights_service.delete_insight(insight.id or "")
st.rerun(scope="fragment" if modal else "app")
st.toast("Insight deleted")
if notebook_id:
if x2.button(
"Save as Note", icon="📝", key=f"save_note_{insight.id}"
):
insights_service.save_insight_as_note(insight.id or "", notebook_id)
st.toast("Saved as Note. Refresh the Notebook to see it.")
with c2:
transformations = transformations_service.get_all_transformations()
if transformations:
with st.container(border=True):
transformation = st.selectbox(
"Run a transformation",
transformations,
key=f"transformation_{source_with_metadata.id}",
format_func=lambda x: x.name,
)
st.caption(transformation.description if transformation else "")
if st.button("Run"):
insights_service.create_source_insight(
source_id=source_with_metadata.id,
transformation_id=transformation.id or ""
)
st.rerun(scope="fragment" if modal else "app")
else:
st.markdown(
"No transformations created yet. Create new Transformation to use this feature."
)
default_models = models_service.get_default_models()
embedding_model = default_models.default_embedding_model
if not embedding_model:
help = (
"No embedding model found. Please, select one on the Models page."
)
else:
help = "This will generate your embedding vectors on the database for powerful search capabilities"
if not source_with_metadata.embedded_chunks and st.button(
"Embed vectors",
icon="🦾",
help=help,
disabled=not embedding_model,
):
from api.embedding_service import embedding_service
result = embedding_service.embed_content(source_with_metadata.id, "source")
result_dict = result if isinstance(result, dict) else result[0] if isinstance(result, list) else {}
st.success(result_dict.get("message", "Embedding complete"))
with st.container(border=True):
st.caption(
"Deleting the source will also delete all its insights and embeddings"
)
if st.button(
"Delete", type="primary", key=f"bt_delete_source_{source_with_metadata.id}"
):
sources_service.delete_source(source_with_metadata.id)
st.rerun()
with source_tab:
st.subheader("Content")
st.markdown(source_with_metadata.full_text)

View File

@ -1,3 +0,0 @@
from dotenv import load_dotenv
load_dotenv()

View File

@ -1,53 +0,0 @@
import os
import streamlit as st
def check_password():
"""
Check if the user has entered the correct password.
Returns True if authenticated or no password is set.
"""
# Get the password from environment variable
app_password = os.environ.get("OPEN_NOTEBOOK_PASSWORD")
# If no password is set, skip authentication
if not app_password:
return True
# Check if already authenticated in this session
if "authenticated" in st.session_state and st.session_state.authenticated:
return True
# Show login form
with st.container():
st.markdown("### 🔒 Authentication Required")
st.markdown("This Open Notebook instance is password protected.")
with st.form("login_form"):
password = st.text_input(
"Password",
type="password",
placeholder="Enter password"
)
submitted = st.form_submit_button("Login")
if submitted:
if password == app_password:
st.session_state.authenticated = True
st.success("Successfully authenticated!")
st.rerun()
else:
st.error("Incorrect password. Please try again.")
# Stop execution if not authenticated
if "authenticated" not in st.session_state or not st.session_state.authenticated:
st.stop()
return True
def logout():
"""Clear authentication from session state."""
if "authenticated" in st.session_state:
del st.session_state.authenticated

View File

@ -1,257 +0,0 @@
import asyncio
import humanize
import streamlit as st
from loguru import logger
from api.chat_service import chat_service
from api.episode_profiles_service import episode_profiles_service
from api.podcast_service import PodcastService
# from open_notebook.plugins.podcasts import PodcastConfig
from open_notebook.utils import parse_thinking_content, token_count
from pages.stream_app.utils import (
convert_source_references,
create_session_for_notebook,
)
from .note import make_note_from_chat
# todo: build a smarter, more robust context manager function
async def build_context(notebook_id):
# Convert context_config format for API
context_config: dict[str, dict[str, str]] = {"sources": {}, "notes": {}}
for id, status in st.session_state[notebook_id]["context_config"].items():
if not id:
continue
item_type, item_id = id.split(":")
if item_type not in ["note", "source"]:
continue
if item_type == "source":
context_config["sources"][item_id] = status
elif item_type == "note":
context_config["notes"][item_id] = status
# Get context via API
result = await chat_service.build_context(
notebook_id=notebook_id, context_config=context_config
)
# Store in session state for compatibility
st.session_state[notebook_id]["context"] = result["context"]
return st.session_state[notebook_id]["context"]
async def execute_chat(txt_input, context, current_session):
# Execute chat via API
result = await chat_service.execute_chat(
session_id=current_session["id"],
message=txt_input,
context=context
)
# Update session state with API response
st.session_state[current_session["id"]]["messages"] = result["messages"]
return result
def chat_sidebar(current_notebook, current_session):
context = asyncio.run(build_context(notebook_id=current_notebook.id))
tokens = token_count(
str(context) + str(st.session_state[current_session["id"]]["messages"])
)
chat_tab, podcast_tab = st.tabs(["Chat", "Podcast"])
with st.expander(f"Context ({tokens} tokens), {len(str(context))} chars"):
st.json(context)
with podcast_tab:
with st.container(border=True):
# Fetch available episode profiles
try:
episode_profiles = episode_profiles_service.get_all_episode_profiles()
episode_profile_names = [ep.name for ep in episode_profiles]
except Exception as e:
st.error(f"Failed to load episode profiles: {str(e)}")
episode_profiles = []
episode_profile_names = []
if len(episode_profiles) == 0:
st.warning(
"No episode profiles found. Please create profiles in the Podcast Profiles tab first."
)
st.page_link("pages/5_🎙_Podcasts.py", label="🎙️ Go to Podcast Profiles")
else:
# Episode Profile selection
selected_episode_profile = st.selectbox(
"Episode Profile", episode_profile_names
)
# Get the selected episode profile object to access speaker_config
selected_profile_obj = next(
(
ep
for ep in episode_profiles
if ep.name == selected_episode_profile
),
None,
)
# Episode details
episode_name = st.text_input(
"Episode Name", placeholder="e.g., AI and the Future of Work"
)
instructions = st.text_area(
"Additional Instructions (Optional)",
placeholder="Any specific instructions beyond the episode profile's default briefing...",
help="These instructions will be added to the episode profile's default briefing.",
)
# Check for context availability
if len(context.get("note", [])) + len(context.get("source", [])) == 0:
st.warning(
"No notes or sources found in context. You don't want a boring podcast, right? So, add some context first."
)
else:
# Generate button
if st.button("🎙️ Generate Podcast", type="primary"):
if not episode_name.strip():
st.error("Please enter an episode name")
else:
try:
with st.spinner("Starting podcast generation..."):
# Use podcast service to generate podcast
async def generate_podcast():
return await PodcastService.submit_generation_job(
episode_profile_name=selected_episode_profile,
speaker_profile_name=selected_profile_obj.speaker_config
if selected_profile_obj
else "",
episode_name=episode_name.strip(),
content=str(context),
briefing_suffix=instructions.strip()
if instructions.strip()
else None,
notebook_id=str(current_notebook.id),
)
job_id = asyncio.run(generate_podcast())
if job_id:
st.info(
"🎉 Podcast generation started successfully! Check the **Podcasts** page to monitor progress and download results."
)
else:
st.error(
"Failed to start podcast generation: No job ID returned"
)
except Exception as e:
logger.error(f"Error generating podcast: {str(e)}")
st.error(f"Error generating podcast: {str(e)}")
# Navigation link
st.divider()
st.page_link("pages/5_🎙_Podcasts.py", label="🎙️ Go to Podcasts")
with chat_tab:
with st.expander(
f"**Session:** {current_session['title']} - {humanize.naturaltime(current_session['updated'])}"
):
new_session_name = st.text_input(
"Current Session",
key="new_session_name",
value=current_session["title"],
)
c1, c2 = st.columns(2)
if c1.button("Rename", key="rename_session"):
asyncio.run(chat_service.update_session(current_session["id"], new_session_name))
st.rerun()
if c2.button("Delete", key="delete_session_1"):
asyncio.run(chat_service.delete_session(current_session["id"]))
st.session_state[current_notebook.id]["active_session"] = None
st.rerun()
st.divider()
new_session_name = st.text_input(
"New Session Name",
key="new_session_name_f",
placeholder="Enter a name for the new session...",
)
st.caption("If no name provided, we'll use the current date.")
if st.button("Create New Session", key="create_new_session"):
new_session = create_session_for_notebook(
notebook_id=current_notebook.id, session_name=new_session_name
)
st.session_state[current_notebook.id]["active_session"] = new_session["id"]
st.rerun()
st.divider()
sessions = asyncio.run(chat_service.get_sessions(current_notebook.id))
if len(sessions) > 1:
st.markdown("**Other Sessions:**")
for session in sessions:
if session["id"] == current_session["id"]:
continue
st.markdown(
f"{session['title']} - {humanize.naturaltime(session['updated'])}"
)
if st.button(label="Load", key=f"load_session_{session['id']}"):
st.session_state[current_notebook.id]["active_session"] = (
session["id"]
)
st.rerun()
with st.container(border=True):
request = st.chat_input("Enter your question")
# removing for now since it's not multi-model capable right now
if request:
response = asyncio.run(execute_chat(
txt_input=request,
context=context,
current_session=current_session,
))
st.session_state[current_session["id"]]["messages"] = response["messages"]
for msg in st.session_state[current_session["id"]]["messages"][::-1]:
# Handle both domain objects and dict responses from API
msg_type = msg.get("type") if isinstance(msg, dict) else msg.type
msg_content = msg.get("content") if isinstance(msg, dict) else msg.content
msg_id = msg.get("id") if isinstance(msg, dict) else getattr(msg, 'id', 'unknown')
if msg_type not in ["human", "ai"]:
continue
if not msg_content:
continue
with st.chat_message(name=msg_type):
if msg_type == "ai":
# Parse thinking content for AI messages
thinking_content, cleaned_content = parse_thinking_content(
msg_content
)
# Show thinking content in expander if present
if thinking_content:
with st.expander("🤔 AI Reasoning", expanded=False):
st.markdown(thinking_content)
# Show the cleaned regular content
if cleaned_content:
st.markdown(convert_source_references(cleaned_content))
elif (
msg_content
): # Fallback to original if cleaning resulted in empty content
st.markdown(convert_source_references(msg_content))
# New Note button for AI messages
if st.button("💾 New Note", key=f"render_save_{msg_id}"):
make_note_from_chat(
content=msg_content,
notebook_id=current_notebook.id,
)
st.rerun()
else:
# Human messages - display normally
st.markdown(convert_source_references(msg_content))

View File

@ -1,10 +0,0 @@
source_context_icons = [
"⛔ not in context",
"🟡 insights",
"🟢 full content",
]
note_context_icons = [
"⛔ not in context",
"🟢 full content",
]

View File

@ -1,87 +0,0 @@
from typing import Optional
import streamlit as st
from humanize import naturaltime
from api.models_service import models_service
from api.notes_service import notes_service
from open_notebook.domain.notebook import Note
from pages.components import note_panel
from .consts import note_context_icons
@st.dialog("Write a Note", width="large")
def add_note(notebook_id):
default_models = models_service.get_default_models()
if not default_models.default_embedding_model:
st.warning(
"Since there is no embedding model selected, your note will be saved but not searchable."
)
note_title = st.text_input("Title")
note_content = st.text_area("Content")
if st.button("Save", key="add_note"):
notes_service.create_note(
content=note_content,
title=note_title,
note_type="human",
notebook_id=notebook_id
)
st.rerun()
@st.dialog("Add a Note", width="large")
def note_panel_dialog(note: Optional[Note] = None, notebook_id=None):
if not note:
raise ValueError("Note is required")
note_panel(note_id=note.id, notebook_id=notebook_id)
def make_note_from_chat(content, notebook_id=None):
# Title will be auto-generated by the API for AI notes
notes_service.create_note(
content=content,
title=None, # Let API generate it
note_type="ai",
notebook_id=notebook_id
)
st.rerun()
def note_card(note, notebook_id):
if note.note_type == "human":
icon = "🤵"
else:
icon = "🤖"
with st.container(border=True):
st.markdown((f"{icon} **{note.title if note.title else 'No Title'}**"))
context_state = st.selectbox(
"Context",
label_visibility="collapsed",
options=note_context_icons,
index=1,
key=f"note_{note.id}",
)
st.caption(f"Updated: {naturaltime(note.updated)}")
if st.button("Expand", icon="📝", key=f"edit_note_{note.id}"):
note_panel_dialog(notebook_id=notebook_id, note=note)
st.session_state[notebook_id]["context_config"][note.id] = context_state
def note_list_item(note_id, score=None):
note = notes_service.get_note(note_id)
if note.note_type == "human":
icon = "🤵"
else:
icon = "🤖"
with st.expander(
f"{icon} [{score:.2f}] **{note.title}** {naturaltime(note.updated) if note.updated else 'N/A'}"
):
st.write(note.content)
if st.button("Edit Note", icon="📝", key=f"x_edit_note_{note.id}"):
note_panel_dialog(note=note)

View File

@ -1,193 +0,0 @@
import asyncio
import os
from pathlib import Path
from typing import Any
import streamlit as st
from humanize import naturaltime
from loguru import logger
from api.insights_service import insights_service
from api.models_service import models_service
from api.settings_service import settings_service
from api.sources_service import sources_service
from api.transformations_service import transformations_service
from open_notebook.config import UPLOADS_FOLDER
from open_notebook.exceptions import UnsupportedTypeException
from pages.components import source_panel
from pages.stream_app.consts import source_context_icons
@st.dialog("Source", width="large")
def source_panel_dialog(source_id, notebook_id=None):
source_panel(source_id, notebook_id=notebook_id, modal=True)
@st.dialog("Add a Source", width="large")
def add_source(notebook_id):
default_models = models_service.get_default_models()
if not default_models.default_speech_to_text_model:
st.warning(
"Since there is no speech to text model selected, you can't upload audio/video files."
)
source_link = None
source_file = None
source_text = None
content_settings = settings_service.get_settings()
source_type = st.radio("Type", ["Link", "Upload", "Text"])
req: dict[str, Any] = {}
transformations = transformations_service.get_all_transformations()
if source_type == "Link":
source_link = st.text_input("Link")
req["url"] = source_link
elif source_type == "Upload":
source_file = st.file_uploader("Upload")
req["delete_source"] = content_settings.auto_delete_files == "yes"
else:
source_text = st.text_area("Text")
req["content"] = source_text
default_transformations = [t for t in transformations if t.apply_default]
apply_transformations = st.multiselect(
"Apply transformations",
options=transformations,
format_func=lambda t: t.name,
default=default_transformations,
)
if content_settings.default_embedding_option == "ask":
run_embed = st.checkbox(
"Embed content for vector search",
help="Creates an embedded content for vector search. Costs a little money and takes a little bit more time. You can do this later if you prefer.",
)
if not run_embed:
st.caption("You can always embed later by clicking on the source.")
elif content_settings.default_embedding_option == "always":
st.caption("Embedding content for vector search automatically")
run_embed = True
else:
st.caption(
"Not embedding content for vector search as per settings. You can always embed later by clicking on the source."
)
run_embed = False
if st.button("Process", key="add_source"):
logger.debug("Adding source")
with st.status("Processing...", expanded=True):
st.write("Processing document...")
try:
if source_type == "Upload" and source_file is not None:
st.write("Uploading..")
file_name = source_file.name
file_extension = Path(file_name).suffix
base_name = Path(file_name).stem
# Generate unique filename
new_path = os.path.join(UPLOADS_FOLDER, file_name)
counter = 0
while os.path.exists(new_path):
counter += 1
new_file_name = f"{base_name}_{counter}{file_extension}"
new_path = os.path.join(UPLOADS_FOLDER, new_file_name)
req["file_path"] = str(new_path)
# Save the file
with open(new_path, "wb") as f:
f.write(source_file.getbuffer())
from api.sources_service import sources_service
# Convert transformations to IDs
transformation_ids = (
[t.id for t in apply_transformations if t.id is not None]
if apply_transformations
else []
)
# Determine source type and parameters
if source_type == "Link":
sources_service.create_source(
notebook_id=notebook_id,
source_type="link",
url=source_link,
transformations=transformation_ids,
embed=run_embed,
)
elif source_type == "Upload":
delete_source_val = req.get("delete_source", False)
sources_service.create_source(
notebook_id=notebook_id,
source_type="upload",
file_path=req["file_path"],
transformations=transformation_ids,
embed=run_embed,
delete_source=bool(delete_source_val) if not isinstance(delete_source_val, bool) else delete_source_val,
)
else: # Text
sources_service.create_source(
notebook_id=notebook_id,
source_type="text",
content=source_text,
transformations=transformation_ids,
embed=run_embed,
)
except UnsupportedTypeException as e:
st.warning(
"This type of content is not supported yet. If you think it should be, let us know on the project Issues's page"
)
st.error(e)
st.link_button(
"Go to Github Issues",
url="https://www.github.com/lfnovo/open-notebook/issues",
)
st.stop()
except Exception as e:
st.exception(e)
return
st.rerun()
def source_card(source, notebook_id):
# todo: more descriptive icons
icon = "🔗"
with st.container(border=True):
title = (source.title if source.title else "No Title").strip()
st.markdown((f"{icon}**{title}**"))
context_state = st.selectbox(
"Context",
label_visibility="collapsed",
options=source_context_icons,
index=1,
key=f"source_{source.id}",
)
insights = insights_service.get_source_insights(source.id)
st.caption(
f"Updated: {naturaltime(source.updated)}, **{len(insights)}** insights"
)
if st.button("Expand", icon="📝", key=source.id):
source_panel_dialog(source.id, notebook_id)
st.session_state[notebook_id]["context_config"][source.id] = context_state
def source_list_item(source_id, score=None):
source_with_metadata = sources_service.get_source(source_id)
source = source_with_metadata.source
if not source:
st.error("Source not found")
return
icon = "🔗"
with st.expander(
f"{icon} [{score:.2f}] **{source.title}** {naturaltime(source.updated) if source.updated else 'N/A'}"
):
source_insights = asyncio.run(source.get_insights())
for insight in source_insights:
st.markdown(f"**{insight.insight_type}**")
st.write(insight.content)
if st.button("Edit source", icon="📝", key=f"x_edit_source_{source.id}"):
source_panel_dialog(source_id=source.id)

View File

@ -1,237 +0,0 @@
import asyncio
import re
from datetime import datetime
from typing import Optional
import nest_asyncio
import streamlit as st
from loguru import logger
nest_asyncio.apply()
from api.chat_service import chat_service
from api.models_service import models_service
from open_notebook.utils import (
compare_versions,
get_installed_version,
get_version_from_github,
)
def version_sidebar():
with st.sidebar:
# Get current version
try:
current_version = get_installed_version("open-notebook")
except Exception:
# Fallback to reading directly from pyproject.toml
import tomli
with open("pyproject.toml", "rb") as f:
pyproject = tomli.load(f)
current_version = pyproject["project"]["version"]
st.write(f"Open Notebook: {current_version}")
# Try to get latest version, but don't fail if unavailable
try:
# Use session state cache to avoid repeated checks
if 'latest_version' not in st.session_state or 'version_check_failed' not in st.session_state:
latest_version = get_version_from_github(
"https://www.github.com/lfnovo/open-notebook", "main"
)
st.session_state.latest_version = latest_version
st.session_state.version_check_failed = False
else:
latest_version = st.session_state.latest_version
if not st.session_state.version_check_failed and compare_versions(current_version, latest_version) < 0:
st.warning(
f"New version {latest_version} available. [Click here for upgrade instructions](https://github.com/lfnovo/open-notebook/blob/main/docs/SETUP.md#upgrading-open-notebook)"
)
except Exception:
# Cache the fact that version check failed to avoid repeated attempts
st.session_state.version_check_failed = True
# Optionally show a subtle message about failed update check
st.caption("⚠️ Could not check for updates (offline or GitHub unavailable)")
def create_session_for_notebook(notebook_id: str, session_name: Optional[str] = None):
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
title = f"Chat Session {current_time}" if not session_name else session_name
session_data = asyncio.run(chat_service.create_session(notebook_id, title))
return session_data
def setup_stream_state(current_notebook) -> dict:
"""
Sets the value of the current session_id for API-based chat functionality.
Creates or retrieves a chat session and sets up session state.
"""
assert current_notebook is not None and current_notebook.id, (
"Current Notebook not selected properly"
)
if "context_config" not in st.session_state[current_notebook.id]:
st.session_state[current_notebook.id]["context_config"] = {}
current_session_id = st.session_state[current_notebook.id].get("active_session")
# gets the chat session if provided
chat_session = None
if current_session_id:
try:
chat_session = asyncio.run(chat_service.get_session(current_session_id))
except Exception as e:
logger.warning(f"Could not retrieve session {current_session_id}: {e}")
# if there is no chat session, create one or get the first one
if not chat_session:
sessions = asyncio.run(chat_service.get_sessions(current_notebook.id))
if not sessions or len(sessions) == 0:
logger.debug("Creating new chat session")
chat_session = create_session_for_notebook(current_notebook.id)
else:
logger.debug("Getting last updated session")
chat_session = sessions[0]
if not chat_session or not chat_session.get("id"):
raise ValueError("Problem acquiring chat session")
# sets the active session for the notebook
session_id = chat_session["id"]
st.session_state[current_notebook.id]["active_session"] = session_id
# Initialize session state for messages if not exists
if session_id not in st.session_state:
# Load the full session with messages from API
try:
full_session = asyncio.run(chat_service.get_session(session_id))
messages = full_session.get("messages", [])
except Exception as e:
logger.warning(f"Could not load messages for session {session_id}: {e}")
messages = []
st.session_state[session_id] = {
"messages": messages,
"context": None,
"notebook": None,
"context_config": {}
}
return chat_session
def check_migration():
"""
DEPRECATED: This function is no longer used.
Database migrations now run automatically when the API starts up.
See api/main.py lifespan handler for the new migration logic.
This function is kept for backward compatibility but does nothing.
"""
# Migrations are now handled automatically by the API on startup
# No user interaction needed
if "migration_required" not in st.session_state:
st.session_state["migration_required"] = False
pass
def check_models(only_mandatory=True, stop_on_error=True):
default_models = models_service.get_default_models()
mandatory_models = [
default_models.default_chat_model,
default_models.default_transformation_model,
default_models.default_embedding_model,
]
all_models = mandatory_models + [
default_models.default_speech_to_text_model,
default_models.large_context_model,
]
if not all(mandatory_models):
st.error(
"You are missing some default models and the app will not work as expected. Please, select them on the Models page."
)
if stop_on_error:
st.stop()
if not only_mandatory:
if not all(all_models):
st.warning(
"You are missing some important optional models. The app might not work as expected. Please, select them on the Models page."
)
def handle_error(func):
"""Decorator for consistent error handling"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.error(f"Error in {func.__name__}: {str(e)}")
logger.exception(e)
st.error(f"An error occurred: {str(e)}")
return wrapper
def setup_page(
title: str,
layout="wide",
sidebar_state="expanded",
only_check_mandatory_models=True,
stop_on_model_error=True,
skip_model_check=False,
):
"""Common page setup for all pages"""
st.set_page_config(
page_title=title, layout=layout, initial_sidebar_state=sidebar_state
)
# Check authentication first
from pages.stream_app.auth import check_password
check_password()
check_migration()
# Skip model check if requested (e.g., on Models page)
if not skip_model_check:
check_models(
only_mandatory=only_check_mandatory_models, stop_on_error=stop_on_model_error
)
version_sidebar()
def convert_source_references(text):
"""
Converts source references in brackets to markdown-style links.
Matches patterns like [source_insight:id], [note:id], [source:id], or [source_embedding:id]
and converts them to markdown links.
Args:
text (str): The input text containing source references
Returns:
str: Text with source references converted to markdown links
Example:
>>> text = "Here is a reference [source_insight:abc123]"
>>> convert_source_references(text)
'Here is a reference [source_insight:abc123](/?object_id=source_insight:abc123)'
"""
# Pattern matches [type:id] where type can be source_insight, note, source, or source_embedding
pattern = r"\[((?:source_insight|note|source|source_embedding):[\w\d]+)\]"
def replace_match(match):
"""Helper function to create the markdown link"""
source_ref = match.group(1) # Gets the content inside brackets
return f"[[{source_ref}]](/?object_id={source_ref})"
# Replace all matches in the text
converted_text = re.sub(pattern, replace_match, text)
return converted_text

View File

@ -1,6 +1,6 @@
[project]
name = "open-notebook"
version = "1.0.0"
version = "1.0.1"
description = "An open source implementation of a research assistant, inspired by Google Notebook LM"
authors = [
{name = "Luis Novo", email = "lfnovo@gmail.com"}
@ -13,18 +13,13 @@ classifiers = [
]
requires-python = ">=3.11,<3.13"
dependencies = [
"streamlit>=1.45.0",
"fastapi>=0.104.0",
"uvicorn>=0.24.0",
"pydantic>=2.9.2",
"loguru>=0.7.2",
"langchain>=0.3.3",
"langgraph>=0.2.38",
"humanize>=4.11.0",
"streamlit-tags>=1.2.8",
"streamlit-scrollable-textbox>=0.0.3",
"tiktoken>=0.8.0",
"streamlit-monaco>=0.1.3",
"langgraph-checkpoint-sqlite>=2.0.0",
"langchain-community>=0.3.3",
"langchain-openai>=0.2.3",
@ -69,8 +64,7 @@ build-backend = "setuptools.build_meta"
[dependency-groups]
dev = [
"pre-commit>=4.1.0",
"types-requests>=2.32.4.20250913",
"watchdog>=6.0.0",
"types-requests>=2.32.4.20250913"
]
[tool.isort]

192
uv.lock
View File

@ -105,22 +105,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792, upload-time = "2025-02-03T07:30:13.6Z" },
]
[[package]]
name = "altair"
version = "5.5.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "jinja2" },
{ name = "jsonschema" },
{ name = "narwhals" },
{ name = "packaging" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/16/b1/f2969c7bdb8ad8bbdda031687defdce2c19afba2aa2c8e1d2a17f78376d8/altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d", size = 705305, upload-time = "2024-11-23T23:39:58.542Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" },
]
[[package]]
name = "annotated-types"
version = "0.7.0"
@ -224,15 +208,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" },
]
[[package]]
name = "blinker"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" },
]
[[package]]
name = "bottleneck"
version = "1.6.0"
@ -808,30 +783,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" },
]
[[package]]
name = "gitdb"
version = "4.0.12"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "smmap" },
]
sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" },
]
[[package]]
name = "gitpython"
version = "3.1.45"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "gitdb" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" },
]
[[package]]
name = "google-ai-generativelanguage"
version = "0.8.0"
@ -2135,15 +2086,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
]
[[package]]
name = "narwhals"
version = "2.8.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ae/05/79a5b5a795f36c1aaa002d194c1ef71e5d95f7e1900155bbfde734815ab9/narwhals-2.8.0.tar.gz", hash = "sha256:52e0b22d54718264ae703bd9293af53b04abc995a1414908c3b807ba8c913858", size = 574277, upload-time = "2025-10-13T08:44:28.81Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1d/86/ac808ecb94322a3f1ea31627d13ab3e50dd4333564d711e0e481ad0f4586/narwhals-2.8.0-py3-none-any.whl", hash = "sha256:6304856676ba4a79fd34148bda63aed8060dd6edb1227edf3659ce5e091de73c", size = 415852, upload-time = "2025-10-13T08:44:25.421Z" },
]
[[package]]
name = "nest-asyncio"
version = "1.6.0"
@ -2257,7 +2199,7 @@ wheels = [
[[package]]
name = "open-notebook"
version = "1.0.0"
version = "1.0.1"
source = { editable = "." }
dependencies = [
{ name = "ai-prompter" },
@ -2266,7 +2208,6 @@ dependencies = [
{ name = "fastapi" },
{ name = "groq" },
{ name = "httpx", extra = ["socks"] },
{ name = "humanize" },
{ name = "langchain" },
{ name = "langchain-anthropic" },
{ name = "langchain-community" },
@ -2284,10 +2225,6 @@ dependencies = [
{ name = "podcast-creator" },
{ name = "pydantic" },
{ name = "python-dotenv" },
{ name = "streamlit" },
{ name = "streamlit-monaco" },
{ name = "streamlit-scrollable-textbox" },
{ name = "streamlit-tags" },
{ name = "surreal-commands" },
{ name = "surrealdb" },
{ name = "tiktoken" },
@ -2309,7 +2246,6 @@ dev = [
dev = [
{ name = "pre-commit" },
{ name = "types-requests" },
{ name = "watchdog" },
]
[package.metadata]
@ -2320,7 +2256,6 @@ requires-dist = [
{ name = "fastapi", specifier = ">=0.104.0" },
{ name = "groq", specifier = ">=0.12.0" },
{ name = "httpx", extras = ["socks"], specifier = ">=0.27.0" },
{ name = "humanize", specifier = ">=4.11.0" },
{ name = "ipykernel", marker = "extra == 'dev'", specifier = ">=6.29.5" },
{ name = "ipywidgets", marker = "extra == 'dev'", specifier = ">=8.1.5" },
{ name = "langchain", specifier = ">=0.3.3" },
@ -2343,10 +2278,6 @@ requires-dist = [
{ name = "pydantic", specifier = ">=2.9.2" },
{ name = "python-dotenv", specifier = ">=1.0.1" },
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.5.5" },
{ name = "streamlit", specifier = ">=1.45.0" },
{ name = "streamlit-monaco", specifier = ">=0.1.3" },
{ name = "streamlit-scrollable-textbox", specifier = ">=0.0.3" },
{ name = "streamlit-tags", specifier = ">=1.2.8" },
{ name = "surreal-commands", specifier = ">=1.0.13" },
{ name = "surrealdb", specifier = ">=1.0.4" },
{ name = "tiktoken", specifier = ">=0.8.0" },
@ -2360,7 +2291,6 @@ provides-extras = ["dev"]
dev = [
{ name = "pre-commit", specifier = ">=4.1.0" },
{ name = "types-requests", specifier = ">=2.32.4.20250913" },
{ name = "watchdog", specifier = ">=6.0.0" },
]
[[package]]
@ -2964,19 +2894,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/83/d6/887a1ff844e64aa823fb4905978d882a633cfe295c32eacad582b78a7d8b/pydantic_settings-2.11.0-py3-none-any.whl", hash = "sha256:fe2cea3413b9530d10f3a5875adffb17ada5c1e1bab0b2885546d7310415207c", size = 48608, upload-time = "2025-09-24T14:19:10.015Z" },
]
[[package]]
name = "pydeck"
version = "0.9.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "jinja2" },
{ name = "numpy" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/40e14e196864a0f61a92abb14d09b3d3da98f94ccb03b49cf51688140dab/pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605", size = 3832240, upload-time = "2024-05-10T15:36:21.153Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038", size = 6900403, upload-time = "2024-05-10T15:36:17.36Z" },
]
[[package]]
name = "pydub"
version = "0.25.1"
@ -3437,15 +3354,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
]
[[package]]
name = "smmap"
version = "5.0.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" },
]
[[package]]
name = "sniffio"
version = "1.3.1"
@ -3553,71 +3461,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" },
]
[[package]]
name = "streamlit"
version = "1.50.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "altair" },
{ name = "blinker" },
{ name = "cachetools" },
{ name = "click" },
{ name = "gitpython" },
{ name = "numpy" },
{ name = "packaging" },
{ name = "pandas" },
{ name = "pillow" },
{ name = "protobuf" },
{ name = "pyarrow" },
{ name = "pydeck" },
{ name = "requests" },
{ name = "tenacity" },
{ name = "toml" },
{ name = "tornado" },
{ name = "typing-extensions" },
{ name = "watchdog", marker = "sys_platform != 'darwin'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d6/f6/f7d3a0146577c1918439d3163707040f7111a7d2e7e2c73fa7adeb169c06/streamlit-1.50.0.tar.gz", hash = "sha256:87221d568aac585274a05ef18a378b03df332b93e08103fffcf3cd84d852af46", size = 9664808, upload-time = "2025-09-23T19:24:00.31Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/38/991bbf9fa3ed3d9c8e69265fc449bdaade8131c7f0f750dbd388c3c477dc/streamlit-1.50.0-py3-none-any.whl", hash = "sha256:9403b8f94c0a89f80cf679c2fcc803d9a6951e0fba542e7611995de3f67b4bb3", size = 10068477, upload-time = "2025-09-23T19:23:57.245Z" },
]
[[package]]
name = "streamlit-monaco"
version = "0.1.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "streamlit" },
]
sdist = { url = "https://files.pythonhosted.org/packages/dc/88/66d784c30a6c5a43a295ad855f3e5a70bab0c392c04a4ade2365aff7a25e/streamlit-monaco-0.1.3.tar.gz", hash = "sha256:cb0ea842f26a89c5987d7e962712603d3594082613387a91403520c79288c519", size = 164437, upload-time = "2023-09-26T07:36:09.997Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1a/e6/12e26a6e63a1d60c5e0d9a81e9521d10d58d97ad05afcd3b4cba33d8a7cb/streamlit_monaco-0.1.3-py3-none-any.whl", hash = "sha256:3e60cb853b0b15c59b372fbc10c0df7d5f2852d4ef259d8489a98341804b7eb0", size = 175338, upload-time = "2023-09-26T07:36:05.382Z" },
]
[[package]]
name = "streamlit-scrollable-textbox"
version = "0.0.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "streamlit" },
]
sdist = { url = "https://files.pythonhosted.org/packages/cd/c4/8dcbcbbd6109420333a06c124f771287d75ae6b328b7e3fb8d0216b32c07/streamlit_scrollable_textbox-0.0.3.tar.gz", hash = "sha256:f32c22fb28a16caa4f952f4ce6bc8ab8dc79c5adf36b188ac8938bc458905fbc", size = 497935, upload-time = "2023-02-15T01:25:00.376Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d8/4f/50ea64244fa602d514c47cb26778a80ec0cd4dec6240e7ca01d040556d1e/streamlit_scrollable_textbox-0.0.3-py3-none-any.whl", hash = "sha256:e48d57f5477491b58afde2cd6a81d6c98bdb8063570fc2467958ae286d883b5c", size = 971299, upload-time = "2023-02-15T01:24:58.681Z" },
]
[[package]]
name = "streamlit-tags"
version = "1.2.8"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "streamlit" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c8/c9/047d5e24f3ca5d31ed8c0ced4ed2e6ac20db5514c7fc814583b0c495e6ee/streamlit_tags-1.2.8.tar.gz", hash = "sha256:9ea46b21f206dc73164e59e3c800a96c863c90af57afde20115f001a6d986583", size = 561247, upload-time = "2021-07-16T17:41:57.503Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/19/56/75000c009c3cdbb960783580dd3b51e1483b3cfa8f2967669fccd4cff894/streamlit_tags-1.2.8-py3-none-any.whl", hash = "sha256:c71b10666f3fce67d8e0b3c089aa50dc48830d310223fb88005b08f157586f95", size = 2678564, upload-time = "2021-07-16T17:41:55.38Z" },
]
[[package]]
name = "surreal-commands"
version = "1.1.1"
@ -3714,15 +3557,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" },
]
[[package]]
name = "toml"
version = "0.10.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" },
]
[[package]]
name = "tomli"
version = "2.3.0"
@ -3903,30 +3737,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/27/73/d9a94da0e9d470a543c1b9d3ccbceb0f59455983088e727b8a1824ed90fb/virtualenv-20.35.3-py3-none-any.whl", hash = "sha256:63d106565078d8c8d0b206d48080f938a8b25361e19432d2c9db40d2899c810a", size = 5981061, upload-time = "2025-10-10T21:23:30.433Z" },
]
[[package]]
name = "watchdog"
version = "6.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" },
{ url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" },
{ url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" },
{ url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" },
{ url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" },
{ url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" },
{ url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" },
{ url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" },
{ url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" },
{ url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" },
{ url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" },
{ url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" },
{ url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" },
{ url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" },
{ url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" },
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
]
[[package]]
name = "wcwidth"
version = "0.2.14"