Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
138 changes: 136 additions & 2 deletions src/vtk_prompt/controllers/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,11 +186,145 @@ def _process_conversation_pair(app: Any, pair_index: int | None = None) -> None:
app.state.query_text = query_text


def _process_loaded_conversation(app: Any) -> None:
def _process_loaded_conversation(
app: Any, conversation_object: dict[str, Any] | None = None
) -> None:
"""Process loaded conversation file."""
if not app.state.conversation:
# Use provided object or fall back to state
if conversation_object:
# Set state without triggering watcher infinite loop
app.state.conversation_object = conversation_object
app.state.conversation_file = conversation_object["name"]
elif not app.state.conversation:
return

# Build navigation pairs and process the latest one
build_conversation_navigation(app)
_process_conversation_pair(app)


def _process_multiple_conversations(app: Any, conversation_files: list[dict[str, Any]]) -> None:
"""Process multiple conversation files together to merge them."""
merged_conversation = []
valid_files = []

for file_obj in conversation_files:
try:
# Validate file before processing
if (
file_obj.get("type") == "application/json"
and Path(file_obj.get("name", "")).suffix == ".json"
and file_obj.get("content")
):
loaded_conversation = json.loads(file_obj["content"])
merged_conversation.extend(loaded_conversation)
valid_files.append(file_obj["name"])
logger.info(f"Loaded conversation file: {file_obj['name']}")
else:
logger.warning(f"Invalid conversation file: {file_obj.get('name')}")

except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON from {file_obj.get('name')}: {e}")
except Exception as e:
logger.error(f"Failed to process conversation file {file_obj.get('name')}: {e}")

if merged_conversation and valid_files:
# Set the merged conversation
app.state.conversation = merged_conversation
app.state.conversation_file = ", ".join(valid_files)
app.prompt_client.update_conversation(merged_conversation, app.state.conversation_file)

# Process the merged conversation
_process_loaded_conversation(app)

logger.info(f"Successfully merged {len(valid_files)} conversation files")
else:
logger.warning("No valid conversation files to process")


def process_uploaded_files(app: Any, uploaded_files: list[dict[str, Any]]) -> None:
"""Process multiple uploaded files with intelligent routing based on file extensions."""
if not uploaded_files:
return

try:
# Separate files by type for batch processing
conversation_files = []
prompt_files = []

for file_obj in uploaded_files:
file_name = file_obj.get("name", "").lower()

if file_name.endswith(".json"):
conversation_files.append(file_obj)
elif file_name.endswith((".yaml", ".yml")):
prompt_files.append(file_obj)
else:
logger.warning(f"Unsupported file type: {file_obj.get('name')}")

# Process all conversation files together to merge them
if conversation_files:
_process_multiple_conversations(app, conversation_files)

# Process prompt files (last one wins for configuration)
for prompt_file in prompt_files:
_process_loaded_prompt(app, prompt_file)

except Exception as e:
logger.error(f"Failed to process uploaded files: {e}")


def _process_loaded_prompt(app: Any, prompt_object: dict[str, Any] | None = None) -> None:
"""Process loaded prompt file using existing prompt loader functionality."""
# Use provided object or fall back to state
prompt_obj = prompt_object or app.state.prompt_object
if not prompt_obj:
return

try:
# Use the existing prompt loader functionality
import os
import tempfile

from ..utils import prompt_loader

# Use the parameter we already assigned above

# Get content and ensure it's a string
content = prompt_obj["content"]
if isinstance(content, bytes):
content = content.decode("utf-8")

# Write content to temp file for the loader
with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False) as tmp:
tmp.write(content) # Now guaranteed to be a string
temp_path = tmp.name

try:
# Use existing custom prompt file logic
original_prompt_file = app.custom_prompt_file
app.custom_prompt_file = temp_path
app.custom_prompt_data = None # Reset

# Load using existing prompt loader
prompt_loader.load_custom_prompt_file(app)

app.state.prompt_file = prompt_obj["name"]
logger.info(f"Loaded custom prompt file: {prompt_obj['name']}")

# Force UI to recognize state changes by triggering model selection update
# This is safe because we're not in a watcher context here
if hasattr(app.state, "provider"):
# Trigger available models update by re-setting provider
current_provider = getattr(app.state, "provider", None)
if current_provider:
app.state.provider = current_provider

finally:
# Clean up temp file and restore original
os.unlink(temp_path)
app.custom_prompt_file = original_prompt_file

except Exception as e:
logger.error(f"Failed to load prompt file: {e}")
app.state.prompt_file = None
17 changes: 10 additions & 7 deletions src/vtk_prompt/rag_chat_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from typing import Any

import click
import query_db
from llama_index.core.llms import ChatMessage
from llama_index.llms.openai import OpenAI

Expand All @@ -34,9 +33,6 @@

logger = get_logger(__name__)

# Add rag-components to path
sys.path.append(str(Path(__file__).resolve().parent.parent.parent / "rag-components"))


def check_rag_components_available() -> bool:
"""Check if RAG components are available and installed."""
Expand All @@ -46,9 +42,9 @@ def check_rag_components_available() -> bool:


def setup_rag_path() -> str:
"""Add rag-components directory to the Python path."""
"""Add rag-components src directory to the Python path."""
repo_root = Path(__file__).resolve().parent.parent.parent
rag_path = str(repo_root / "rag-components")
rag_path = str(repo_root / "rag-components" / "src")
if rag_path not in sys.path:
sys.path.append(rag_path)
return rag_path
Expand All @@ -63,7 +59,8 @@ def get_rag_snippets(
"""Get code snippets from the RAG database."""
setup_rag_path()
try:
import query_db
# Lazy import after path setup
from rag_components import query_db # type: ignore[import-not-found]

client = query_db.initialize_db(database_path)
results = query_db.query_db(query, collection_name, top_k, client)
Expand Down Expand Up @@ -118,6 +115,10 @@ def _init_components(self) -> None:
except Exception as e:
raise RuntimeError(f"Unsupported Model {self.model}: {e}")

# Lazy import after path setup
setup_rag_path()
from rag_components import query_db # type: ignore[import-not-found]

self.client = query_db.initialize_db(database_path=self.database)
os.environ["TOKENIZERS_PARALLELISM"] = "false"

Expand All @@ -143,6 +144,8 @@ def ask(
self.history.append(ChatMessage(role="user", content=query))

# Query the RAG database for relevant documents
from rag_components import query_db # type: ignore[import-not-found]

results = query_db.query_db(query, collection_name, top_k, self.client)
relevant_examples = [item["original_id"] for item in results["code_metadata"]] + [
item["code"] for item in results["text_metadata"]
Expand Down
9 changes: 9 additions & 0 deletions src/vtk_prompt/state/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ def initialize_state(app: Any) -> None:
app.state.error_message = ""
app.state.input_tokens = 0
app.state.output_tokens = 0
app.state.advanced_settings_open = False
app.state.active_settings_tab = "files"

# File upload state variables
app.state.uploaded_files = None

# Conversation state variables
app._conversation_loading = False
Expand All @@ -42,6 +47,10 @@ def initialize_state(app: Any) -> None:
app.state.can_navigate_right = False
app.state.is_viewing_history = False

# Prompt file state variables
app.state.prompt_object = None
app.state.prompt_file = None

# Toast notification state
app.state.toast_message = ""
app.state.toast_visible = False
Expand Down
4 changes: 2 additions & 2 deletions src/vtk_prompt/ui/layout/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@
"""

from .content import build_content
from .drawer import build_drawer
from .settings_dialog import build_settings_dialog
from .toolbar import build_toolbar

__all__ = [
"build_toolbar",
"build_drawer",
"build_content",
"build_settings_dialog",
]
Loading
Loading