Skip to content
8 changes: 5 additions & 3 deletions docs/BaseComponent_ko.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,13 +190,15 @@ retriever = FunctionalComponent(my_retriever, name="MyRetriever", hook=hook)

```python
from lang2sql.core.hooks import MemoryHook
from lang2sql.flows.baseline import SequentialFlow

hook = MemoryHook()

flow = BaselineFlow(steps=[...], hook=hook) # 또는 컴포넌트마다 hook 주입
out = flow.run_query("지난달 매출")
flow = SequentialFlow(steps=[...], hook=hook) # 또는 컴포넌트마다 hook 주입
out = flow.run("지난달 매출")

# 이벤트 확인
for e in hook.events:
for e in hook.snapshot():
print(e.phase, e.component, e.duration_ms, e.error)
```

Expand Down
10 changes: 5 additions & 5 deletions docs/Hook_and_exception_ko.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,16 @@ class MemoryHook:

#### MemoryHook 사용 예시

```py
```python
from lang2sql.core.hooks import MemoryHook
from lang2sql.flows.baseline import BaselineFlow
from lang2sql.flows.baseline import SequentialFlow

hook = MemoryHook()
flow = BaselineFlow(steps=[...], hook=hook)
flow = SequentialFlow(steps=[...], hook=hook)

out = flow.run_query("지난달 매출")
out = flow.run("지난달 매출")

for e in hook.events:
for e in hook.snapshot():
print(e.name, e.phase, e.component, e.duration_ms, e.error)
```

Expand Down
53 changes: 43 additions & 10 deletions docs/tutorials/getting-started-without-datahub.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,19 +122,53 @@ print(f"FAISS index saved to: {OUTPUT_DIR}/catalog.faiss")

### 4) 실행

v2 CLI는 외부 벡터 인덱스 경로를 인수로 받지 않습니다.
앞서 생성한 FAISS 인덱스를 활용하려면 Python API로 파이프라인을 직접 구성합니다.

```python
# run_query.py
import os
from dotenv import load_dotenv
from lang2sql import CatalogChunker, VectorRetriever
from lang2sql.integrations.db import SQLAlchemyDB
from lang2sql.integrations.embedding import OpenAIEmbedding
from lang2sql.integrations.llm import OpenAILLM
from lang2sql.integrations.vectorstore import FAISSVectorStore
from lang2sql.flows.hybrid import HybridNL2SQL

load_dotenv()

INDEX_DIR = "./dev/table_info_db"
embedding = OpenAIEmbedding(
model=os.getenv("OPEN_AI_EMBEDDING_MODEL", "text-embedding-3-large"),
api_key=os.getenv("OPEN_AI_KEY"),
)

# FAISS 인덱스 로드 후 파이프라인 구성
store = FAISSVectorStore.load(f"{INDEX_DIR}/catalog.faiss")

pipeline = HybridNL2SQL(
catalog=[], # FAISS에 이미 인덱싱돼 있으므로 빈 리스트
llm=OpenAILLM(model=os.getenv("OPEN_AI_LLM_MODEL", "gpt-4o"), api_key=os.getenv("OPEN_AI_KEY")),
db=SQLAlchemyDB(os.getenv("DB_URL", "sqlite:///sample.db")),
embedding=embedding,
db_dialect=os.getenv("DB_TYPE", "sqlite"),
)

rows = pipeline.run("주문 수를 집계하는 SQL을 만들어줘")
print(rows)
```

Streamlit UI:

```bash
# Streamlit UI
lang2sql run-streamlit
```

# CLI 예시 (FAISS 인덱스 사용)
lang2sql query "주문 수를 집계하는 SQL을 만들어줘" \
--vectordb-type faiss \
--vectordb-location ./dev/table_info_db
CLI (카탈로그 없이 baseline만 가능):

# CLI 예시 (pgvector)
lang2sql query "주문 수를 집계하는 SQL을 만들어줘" \
--vectordb-type pgvector \
--vectordb-location "postgresql://pgvector:pgvector@localhost:5432/postgres"
```bash
lang2sql query "주문 수를 집계해줘" --flow baseline --dialect sqlite
```

### 5) (선택) pgvector로 적재하기
Expand Down Expand Up @@ -229,4 +263,3 @@ VectorRetriever.from_chunks(
print(f"pgvector collection populated: {TABLE}")
```

주의: FAISS 디렉토리 또는 pgvector 컬렉션이 없으면 현재 코드는 DataHub에서 메타데이터를 가져와 인덱스를 생성하려고 시도합니다. DataHub를 사용하지 않는 경우 위 절차로 사전에 VectorDB를 만들어 두세요.
94 changes: 94 additions & 0 deletions docs/tutorials/v2-complete-tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
5-1. 샘플 문서 자동 생성
6. 가장 쉬운 로컬 스모크 테스트 (API 키 없이)
7. BaselineNL2SQL 기본 사용 (KeywordRetriever)
7-1. DB 탐색: SQLAlchemyExplorer
8. 실제 LLM 연결 (OpenAI / Anthropic)
9. VectorRetriever 기초 (빠른 시작)
10. 문서 파싱: MarkdownLoader / PlainTextLoader / DirectoryLoader / PDFLoader
Expand Down Expand Up @@ -232,6 +233,99 @@ print(rows)

---

## 7-1) DB 탐색: SQLAlchemyExplorer

LLM에게 넘길 스키마 정보가 필요하거나, 처음 보는 DB를 손으로 살펴볼 때 사용합니다.
카탈로그를 미리 구축하지 않아도 DDL + 샘플 데이터를 바로 꺼내볼 수 있습니다.

### 기본 사용

```python
from lang2sql import build_explorer_from_url

exp = build_explorer_from_url("sqlite:///sample.db")

# 1) 어떤 테이블이 있는지
print(exp.list_tables())
# ['customers', 'orders', ...]

# 2) 테이블 DDL — CREATE TABLE 원문
print(exp.get_ddl("orders"))
# CREATE TABLE orders (
# id INTEGER PRIMARY KEY,
# customer_id INTEGER NOT NULL REFERENCES customers(id),
# amount REAL,
# status TEXT DEFAULT 'pending'
# )

# 3) 실제 샘플 데이터 (기본 5행)
print(exp.sample_data("orders"))
# [{'id': 1, 'customer_id': 1, 'amount': 99.9, 'status': 'shipped'}, ...]

# 4) 커스텀 읽기 전용 질의
print(exp.execute_read_only("SELECT status, COUNT(*) AS cnt FROM orders GROUP BY status"))
# [{'status': 'pending', 'cnt': 3}, {'status': 'shipped', 'cnt': 2}]
```

### 전체 테이블 한 번에 둘러보기

```python
from lang2sql import build_explorer_from_url

exp = build_explorer_from_url("sqlite:///sample.db")

for table in exp.list_tables():
print(f"\n=== {table} ===")
print(exp.get_ddl(table))
rows = exp.sample_data(table, limit=2)
print("샘플:", rows)
```

### PostgreSQL / MySQL 연결

URL만 바꾸면 됩니다.

```python
from lang2sql import build_explorer_from_url

# PostgreSQL
exp = build_explorer_from_url("postgresql://user:password@localhost:5432/mydb")

# MySQL
exp = build_explorer_from_url("mysql+pymysql://user:password@localhost:3306/mydb")

# schema 지정 (schema 파라미터)
exp = build_explorer_from_url("postgresql://user:pass@host/db", schema="analytics")
print(exp.list_tables()) # analytics 스키마 테이블만
```

### 기존 SQLAlchemyDB engine 재사용

연결 풀을 따로 만들지 않고 공유할 수 있습니다.

```python
from lang2sql.integrations.db import SQLAlchemyDB, SQLAlchemyExplorer

db = SQLAlchemyDB("sqlite:///sample.db")
exp = SQLAlchemyExplorer.from_engine(db._engine)

# db는 SQL 실행, exp는 탐색 — 같은 연결 풀 공유
rows = db.execute("SELECT COUNT(*) AS cnt FROM orders")
ddl = exp.get_ddl("orders")
```

### 쓰기 구문은 거부됩니다

```python
exp.execute_read_only("DROP TABLE orders")
# ValueError: Write operations not allowed: 'DROP TABLE orders'

exp.execute_read_only("INSERT INTO orders VALUES (99, 1, 0, 'test')")
# ValueError: Write operations not allowed: 'INSERT INTO orders ...'
```

---

## 8) 실제 LLM 연결 (OpenAI / Anthropic)

LLM 백엔드는 교체 가능합니다.
Expand Down
40 changes: 36 additions & 4 deletions src/lang2sql/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
from .factory import build_db_from_env, build_embedding_from_env, build_llm_from_env
from .factory import (
build_db_from_env,
build_embedding_from_env,
build_explorer_from_url,
build_llm_from_env,
)
from .components.enrichment.context_enricher import ContextEnricher
from .components.enrichment.question_profiler import QuestionProfiler
from .components.execution.sql_executor import SQLExecutor
Expand Down Expand Up @@ -28,16 +33,18 @@
from .core.exceptions import ComponentError, IntegrationMissingError, Lang2SQLError
from .core.hooks import MemoryHook, NullHook, TraceHook
from .core.ports import (
CatalogLoaderPort,
DBExplorerPort,
DBPort,
DocumentLoaderPort,
EmbeddingPort,
LLMPort,
VectorStorePort,
)
from .integrations.db.sqlalchemy_ import SQLAlchemyExplorer
from .flows.enriched_nl2sql import EnrichedNL2SQL
from .flows.hybrid import HybridNL2SQL
from .flows.nl2sql import BaselineNL2SQL
from .integrations.catalog.datahub_ import DataHubCatalogLoader
from .integrations.embedding.azure_ import AzureOpenAIEmbedding
from .integrations.embedding.bedrock_ import BedrockEmbedding
from .integrations.embedding.gemini_ import GeminiEmbedding
Expand All @@ -48,8 +55,6 @@
from .integrations.llm.gemini_ import GeminiLLM
from .integrations.llm.huggingface_ import HuggingFaceLLM
from .integrations.llm.ollama_ import OllamaLLM
from .integrations.vectorstore.faiss_ import FAISSVectorStore
from .integrations.vectorstore.pgvector_ import PGVectorStore

__all__ = [
# Data types
Expand All @@ -64,9 +69,11 @@
# Ports (protocols)
"LLMPort",
"DBPort",
"DBExplorerPort",
"EmbeddingPort",
"VectorStorePort",
"DocumentLoaderPort",
"CatalogLoaderPort",
# Components — retrieval
"KeywordRetriever",
"VectorRetriever",
Expand Down Expand Up @@ -116,8 +123,33 @@
"OllamaEmbedding",
# Catalog integrations (Phase 3)
"DataHubCatalogLoader",
# DB Explorer (Phase A1)
"SQLAlchemyExplorer",
# Factory (Phase 6)
"build_llm_from_env",
"build_embedding_from_env",
"build_db_from_env",
"build_explorer_from_url",
]

# ---------------------------------------------------------------------------
# Lazy imports (PEP 562) — optional dependencies that have import side-effects
# (e.g. faiss prints INFO logs on import) or are rarely needed at startup.
# ---------------------------------------------------------------------------
_LAZY_IMPORTS: dict[str, tuple[str, str]] = {
"DataHubCatalogLoader": (".integrations.catalog.datahub_", "DataHubCatalogLoader"),
"FAISSVectorStore": (".integrations.vectorstore.faiss_", "FAISSVectorStore"),
"PGVectorStore": (".integrations.vectorstore.pgvector_", "PGVectorStore"),
}


def __getattr__(name: str):
if name in _LAZY_IMPORTS:
module_path, attr = _LAZY_IMPORTS[name]
import importlib

obj = getattr(importlib.import_module(module_path, package=__name__), attr)
# Cache in module globals so subsequent accesses skip __getattr__
globals()[name] = obj
return obj
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
75 changes: 75 additions & 0 deletions src/lang2sql/components/retrieval/vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,81 @@ def add(self, chunks: list[IndexedChunk]) -> None:
self._vectorstore.upsert(ids, vectors)
self._registry.update({c["chunk_id"]: c for c in chunks})

# ── Persistence ──────────────────────────────────────────────────

def save(self, path: str) -> None:
"""벡터 인덱스와 registry를 path에 저장.

FAISSVectorStore처럼 save()를 지원하는 store에서만 동작한다.
InMemoryVectorStore 등 save()가 없는 store는 NotImplementedError.

저장 파일:
{path} — FAISSVectorStore 벡터 인덱스
{path}.meta — chunk_id 순서 목록 (FAISSVectorStore 내부)
{path}.registry — registry JSON
"""
import json
import pathlib

save_fn = getattr(self._vectorstore, "save", None)
if save_fn is None:
raise NotImplementedError(
f"{type(self._vectorstore).__name__} does not support save(). "
"Use FAISSVectorStore for file-based persistence."
)
save_fn(path)
pathlib.Path(path + ".registry").write_text(
json.dumps(self._registry), encoding="utf-8"
)

@classmethod
def load(
cls,
path: str,
*,
vectorstore: VectorStorePort,
embedding: EmbeddingPort,
top_n: int = 5,
score_threshold: float = 0.0,
name: Optional[str] = None,
hook: Optional[TraceHook] = None,
) -> "VectorRetriever":
"""저장된 registry를 복원해 VectorRetriever를 반환.

벡터 인덱스 복원은 호출자가 직접 수행한 뒤 vectorstore로 전달한다.
이렇게 하면 VectorRetriever가 특정 store 구현체에 의존하지 않는다.

Args:
path: save() 시 사용한 경로 (registry 파일 위치 기준).
vectorstore: 이미 로드된 VectorStorePort 구현체.
embedding: EmbeddingPort 구현체.
top_n: 최대 반환 스키마/컨텍스트 수. 기본 5.
score_threshold: 이 점수 이하는 결과에서 제외. 기본 0.0.

Example:
store = FAISSVectorStore.load(path)
retriever = VectorRetriever.load(path, vectorstore=store, embedding=emb)
"""
import json
import pathlib

registry_path = pathlib.Path(path + ".registry")
if not registry_path.exists():
raise FileNotFoundError(f"Registry file not found: {registry_path}")

registry = json.loads(registry_path.read_text(encoding="utf-8"))
return cls(
vectorstore=vectorstore,
embedding=embedding,
registry=registry,
top_n=top_n,
score_threshold=score_threshold,
name=name,
hook=hook,
)

# ── Core retrieval ────────────────────────────────────────────────

def _run(self, query: str) -> RetrievalResult:
"""
Args:
Expand Down
Loading