diff --git a/examples/chat-stream.py b/examples/chat-stream.py index 4e7625ef..684e3ba3 100644 --- a/examples/chat-stream.py +++ b/examples/chat-stream.py @@ -1,3 +1,15 @@ +"""Streaming chat response. + +Prints tokens as they are generated instead of waiting for the full +response, providing a real-time typing effect. + +Prerequisites: + ollama pull gemma3 + +Usage: + python chat-stream.py +""" + from ollama import chat messages = [ diff --git a/examples/chat-with-history.py b/examples/chat-with-history.py index 72754718..94f60113 100644 --- a/examples/chat-with-history.py +++ b/examples/chat-with-history.py @@ -1,3 +1,15 @@ +"""Multi-turn chat with conversation history. + +Sends multiple messages in sequence, maintaining context across turns +so the model can reference earlier parts of the conversation. + +Prerequisites: + ollama pull gemma3 + +Usage: + python chat-with-history.py +""" + from ollama import chat messages = [ diff --git a/examples/chat.py b/examples/chat.py index fd498430..e3ce4ae2 100644 --- a/examples/chat.py +++ b/examples/chat.py @@ -1,3 +1,14 @@ +"""Basic chat completion. + +Sends a single user message and prints the model's response. + +Prerequisites: + ollama pull gemma3 + +Usage: + python chat.py +""" + from ollama import chat messages = [ diff --git a/examples/embed.py b/examples/embed.py index 5af145ea..dbc5575e 100644 --- a/examples/embed.py +++ b/examples/embed.py @@ -1,3 +1,15 @@ +"""Generate text embeddings. + +Produces a vector embedding for the given input text, useful for +semantic search, clustering, and similarity comparisons. + +Prerequisites: + ollama pull llama3.2 + +Usage: + python embed.py +""" + from ollama import embed response = embed(model='llama3.2', input='Hello, world!') diff --git a/examples/generate.py b/examples/generate.py index 69483e58..8e069c1d 100644 --- a/examples/generate.py +++ b/examples/generate.py @@ -1,3 +1,14 @@ +"""Basic text generation. + +Generates a response from a prompt without conversation history. + +Prerequisites: + ollama pull gemma3 + +Usage: + python generate.py +""" + from ollama import generate response = generate('gemma3', 'Why is the sky blue?') diff --git a/examples/multimodal-chat.py b/examples/multimodal-chat.py index db9209b1..1fdc2d28 100644 --- a/examples/multimodal-chat.py +++ b/examples/multimodal-chat.py @@ -1,3 +1,15 @@ +"""Multimodal chat with image input. + +Sends an image alongside a text prompt, allowing the model to describe +or answer questions about the image content. + +Prerequisites: + ollama pull gemma3 + +Usage: + python multimodal-chat.py +""" + from ollama import chat # from pathlib import Path diff --git a/examples/structured-outputs.py b/examples/structured-outputs.py index 4c60d5f4..5e318739 100644 --- a/examples/structured-outputs.py +++ b/examples/structured-outputs.py @@ -1,3 +1,16 @@ +"""Structured JSON output with Pydantic validation. + +Forces the model to return JSON conforming to a Pydantic schema, +then validates the response automatically. + +Prerequisites: + pip install pydantic + ollama pull llama3.1:8b + +Usage: + python structured-outputs.py +""" + from pydantic import BaseModel from ollama import chat diff --git a/examples/tools.py b/examples/tools.py index 86019fd3..bb010ccb 100644 --- a/examples/tools.py +++ b/examples/tools.py @@ -1,3 +1,15 @@ +"""Tool calling (function calling) with Ollama. + +Demonstrates how to define Python functions as tools that the model can +invoke, dispatch the calls, and feed results back into the conversation. + +Prerequisites: + ollama pull llama3.1 + +Usage: + python tools.py +""" + from ollama import ChatResponse, chat