Skip to content

Commit

Permalink
improve slides
Browse files Browse the repository at this point in the history
  • Loading branch information
vemonet committed Feb 18, 2025
1 parent d0dbb03 commit f6bb74f
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 36 deletions.
2 changes: 1 addition & 1 deletion packages/sparql-llm/src/sparql_llm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Utilities to improve LLMs capabilities when working with SPARQL and RDF."""

__version__ = "0.0.5"
__version__ = "0.0.6"

from .utils import SparqlEndpointInfo
from .validate_sparql import validate_sparql_in_msg, validate_sparql_with_void
Expand Down
4 changes: 2 additions & 2 deletions tutorial/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ version = "0.0.1"
requires-python = "==3.12.*"

dependencies = [
# "sparql-llm >=0.0.6",
"sparql-llm >=0.0.6",
# "sparql-llm @ git+https://github.com/sib-swiss/sparql-llm.git#subdirectory=packages/sparql-llm",
"sparql-llm @ file:///home/vemonet/dev/expasy/sparql-llm/packages/sparql-llm",
# "sparql-llm @ file:///home/vemonet/dev/expasy/sparql-llm/packages/sparql-llm",
"langchain >=0.3.19",
"langchain-community >=0.3.17",
"langchain-openai >=0.3.6",
Expand Down
11 changes: 10 additions & 1 deletion tutorial/slides/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,27 @@
import Reveal from 'reveal.js';
import Markdown from 'reveal.js/plugin/markdown/markdown.esm.js';
import Highlight from 'reveal.js/plugin/highlight/highlight.js';
import ClipCode from '@edc4it/reveal.js-clipcode';
import 'reveal.js/plugin/highlight/zenburn.css';
import 'reveal.js/dist/reveal.css'
import 'reveal.js/dist/theme/beige.css'
// Available themes in node_modules/reveal.js/dist/theme
// beige, black, blood, league, moon, night, serif, simple...

let deck = new Reveal({
plugins: [Markdown, Highlight],
plugins: [Markdown, Highlight, ClipCode],
hash: true,
history: true,
hashOneBasedIndex: true,
// slideNumber: true,
clipcode: {
// https://www.npmjs.com/package/@edc4it/reveal.js-clipcode
style: {
copybg: 'silver',
scale: 0.8,
radius: 1,
},
},
});
deck.initialize();
</script>
Expand Down
22 changes: 22 additions & 0 deletions tutorial/slides/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions tutorial/slides/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"serve": "vite preview"
},
"devDependencies": {
"@edc4it/reveal.js-clipcode": "^1.0.11",
"@types/node": "^22.13.4",
"reveal.js": "^5.1.0",
"vite": "^6.1.0"
Expand Down
58 changes: 26 additions & 32 deletions tutorial/slides/public/slides.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,6 @@ OPENAI_API_KEY=sk-proj-YYY
---

## Setup vector store

Deploy a **[Qdrant](https://qdrant.tech/documentation/)** vector store using docker:

```sh
docker run -d -p 6333:6333 -p 6334:6334 -v $(pwd)/data/qdrant:/qdrant/storage qdrant/qdrant
```

If you don't have docker you can try [download and deploy the binary](https://github.com/qdrant/qdrant/releases/tag/v1.13.4) for your platform (might require installing additional dependencies though)

> Using in-memory vector store is also an option, but limited to 1 thread, with high risk of conflicts and no dashboard.
---

## Setup dependencies

Create a `pyproject.toml` file with this content:
Expand All @@ -47,7 +33,7 @@ name = "tutorial-sparql-agent"
version = "0.0.1"
requires-python = "==3.12.*"
dependencies = [
"sparql-llm >=0.0.5",
"sparql-llm >=0.0.6",
"langchain >=0.3.19",
"langchain-community >=0.3.17",
"langchain-openai >=0.3.6",
Expand Down Expand Up @@ -144,6 +130,20 @@ llm = load_chat_model("ollama/mistral")

---

## Setup vector store

Deploy a **[Qdrant](https://qdrant.tech/documentation/)** vector store using docker to store indexed documents:

```sh
docker run -d -p 6333:6333 -p 6334:6334 -v $(pwd)/data/qdrant:/qdrant/storage qdrant/qdrant
```

If you don't have docker you can try to [download and deploy the binary](https://github.com/qdrant/qdrant/releases/tag/v1.13.4) for your platform (this might require to install additional dependencies though)

> Using in-memory vector store is also an option, but limited to 1 thread, with high risk of conflicts and no dashboard.
---

## Index context

Create a new script that will be run to index data from SPARQL endpoints: `index.py`
Expand Down Expand Up @@ -188,6 +188,7 @@ def index_endpoints():
void_loader = SparqlVoidShapesLoader(
endpoint["endpoint_url"],
void_file=endpoint.get("void_file"),
examples_file=endpoint.get("examples_file"),
verbose=True,
)
docs += void_loader.load()
Expand Down Expand Up @@ -321,26 +322,18 @@ def retrieve_docs(question: str) -> str:
retrieved_docs = retriever.invoke(
question,
k=retrieved_docs_count,
filter=Filter(
must=[
FieldCondition(
key="metadata.doc_type",
match=MatchValue(value="SPARQL endpoints query examples"),
)
]
)
filter=Filter(must=[FieldCondition(
key="metadata.doc_type",
match=MatchValue(value="SPARQL endpoints query examples"),
)])
)
retrieved_docs += retriever.invoke(
question,
k=retrieved_docs_count,
filter=Filter(
must_not=[
FieldCondition(
key="metadata.doc_type",
match=MatchValue(value="SPARQL endpoints query examples"),
)
]
)
filter=Filter(must_not=[FieldCondition(
key="metadata.doc_type",
match=MatchValue(value="SPARQL endpoints query examples"),
)])
)
return f"<documents>\n{'\n'.join(_format_doc(doc) for doc in retrieved_docs)}\n</documents>"

Expand Down Expand Up @@ -371,7 +364,7 @@ async def on_message(msg: cl.Message):
await final_answer.send()
```

Deploy the UI with:
Deploy the UI on http://localhost:8000 with:

```sh
uv run chainlit run app.py
Expand Down Expand Up @@ -579,3 +572,4 @@ builder.add_edge("call_model", "validate_output")
builder.add_conditional_edges("validate_output", route_model_output)
```

> Try again your agent now

0 comments on commit f6bb74f

Please sign in to comment.