Update Ollama integration for Matrix bot

- Added Ollama client integration to handle chat completions.
- Modified the `chatbot` function to use the Ollama client for chat interactions.
- Removed the `dalle` function as it is not supported by Ollama.
- Updated the `reset_chat` function to handle resetting the chat history with Ollama.
- Added new settings for Ollama base URL and model in `settings.py`.
- Updated documentation in `README.md` to reflect the changes.
This commit is contained in:
Roger Gonzalez 2025-03-13 16:10:25 -03:00
parent 89d76525ec
commit 451ab831b8
Signed by: rogs
GPG Key ID: C7ECE9C6C36EC2E6
4 changed files with 108 additions and 44 deletions

View File

@ -27,9 +27,8 @@ A Matrix bot that helps manage TODOs, track expenses, monitor bank accounts, sav
- Auto-saves any shared URL to an org-mode file
- **AI Integration**:
- Chat with GPT-4 (continues conversation)
- Chat with Llama 3.2 via Ollama (continues conversation)
- `!reset` - Reset chat history
- `!dalle` - Generate images using DALL-E
## Setup
@ -38,7 +37,15 @@ A Matrix bot that helps manage TODOs, track expenses, monitor bank accounts, sav
poetry install
```
2. Create a `.env` file with the following variables:
2. Install and run Ollama:
- Follow the instructions at [Ollama's website](https://ollama.ai/) to install Ollama
- Pull the Llama 3.2 model:
```bash
ollama pull llama3.2:latest
```
- Start the Ollama server (it typically runs on port 11434)
3. Create a `.env` file with the following variables:
```
# Matrix Configuration
MATRIX_URL=
@ -54,11 +61,12 @@ ORG_CAPTURE_FILENAME=
ORG_PLAN_FILENAME=
ORG_LINKS_FILENAME=
# API Keys
OPEN_AI_API_KEY=
# Ollama Configuration
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3.2:latest
```
3. Run the bot:
4. Run the bot:
```bash
python bot.py
```
@ -68,12 +76,13 @@ python bot.py
- Python 3.9+
- Poetry for dependency management
- Matrix server access
- Optional: Bank accounts with BROU and Itau for banking features
- Optional: OpenAI API key for AI features
- Optional: Bank accounts with Bank of America for banking features
- Ollama installed and running with the llama3.2:latest model
## Project Structure
- `bot.py`: Main bot implementation with command handlers
- `ollama_client.py`: Ollama API client for AI features
- `bofa.py`: Bank of America data processing
- `org.py`: Org-mode file management
- `settings.py`: Environment configuration
@ -83,6 +92,5 @@ python bot.py
Key dependencies include:
- `simplematrixbotlib`: Matrix bot framework
- `orgparse`: Org-mode file parsing
- `openai`: GPT-4 and DALL-E integration
- `requests`: API interactions with Ollama
- `pyexcel-ods3`: Spreadsheet processing
- `requests`: API interactions

57
bot.py
View File

@ -1,13 +1,11 @@
"""A Matrix bot that manages TODOs, expenses, and AI interactions."""
import os
import openai
import simplematrixbotlib as botlib
import validators
import wget
from bofa import BofaData
from ollama_client import OllamaClient
from org import OrgData
from settings import (
MATRIX_PASSWORD,
@ -15,10 +13,12 @@ from settings import (
MATRIX_USER,
MATRIX_USERNAME,
MATRIX_USERNAMES,
OPEN_AI_API_KEY,
OLLAMA_BASE_URL, # New setting for Ollama
OLLAMA_MODEL, # New setting for Ollama model
)
openai.api_key = OPEN_AI_API_KEY
# Initialize the Ollama client
ollama = OllamaClient(base_url=OLLAMA_BASE_URL, model=OLLAMA_MODEL)
creds = botlib.Creds(MATRIX_URL, MATRIX_USER, MATRIX_PASSWORD)
bot = botlib.Bot(creds)
@ -144,12 +144,12 @@ async def save_link(room, message):
@bot.listener.on_message_event
async def chatgpt(room, message):
"""Start a conversation with ChatGPT.
async def chatbot(room, message):
"""Start a conversation with the Ollama model.
Usage:
user: !chatgpt Hello!
bot: [prints chatgpt response]
user: Any message
bot: [prints Ollama model response]
"""
match = botlib.MessageMatch(room, message, bot, PREFIX)
message_content = message.body
@ -160,7 +160,7 @@ async def chatgpt(room, message):
personal_conversation = CONVERSATION[user]
room_id = room.room_id
print(f"Room: {room_id}, User: {user}, Message: chatgpt")
print(f"Room: {room_id}, User: {user}, Message: chat with Ollama")
def format_message(message):
return {"role": "user", "content": message}
@ -168,9 +168,10 @@ async def chatgpt(room, message):
personal_conversation.append(format_message(message_content))
try:
completion = openai.ChatCompletion.create(model="gpt-4o", messages=personal_conversation)
response = completion.choices[0].message.content
personal_conversation.append(completion.choices[0].message)
# Using Ollama instead of OpenAI
completion = ollama.chat_completion(personal_conversation)
response = completion["choices"][0]["message"]["content"]
personal_conversation.append(completion["choices"][0]["message"])
except Exception as e:
print(f"Error: {e}")
response = "There was a problem with your prompt"
@ -180,8 +181,8 @@ async def chatgpt(room, message):
@bot.listener.on_message_event
async def reset_chatgpt(room, message):
"""Reset the ChatGPT conversation history.
async def reset_chat(room, message):
"""Reset the chat conversation history.
Usage:
user: !reset
@ -200,12 +201,12 @@ async def reset_chatgpt(room, message):
@bot.listener.on_message_event
async def dall_e(room, message):
"""Generate an image using DALL-E.
async def dalle(room, message):
"""Generate an image (feature not available with Ollama).
Usage:
user: !dalle A sunny caribbean beach
bot: returns an image
bot: returns an error message
"""
match = botlib.MessageMatch(room, message, bot, PREFIX)
if match.is_not_from_this_bot() and match.prefix() and match.command("dalle"):
@ -213,22 +214,12 @@ async def dall_e(room, message):
if user in MATRIX_USERNAMES:
room_id = room.room_id
message = " ".join(message.body.split(" ")[1:]).strip()
print(f"Room: {room_id}, User: {user}, Message: dalle")
await bot.api.send_text_message(room_id, "Generating image...")
try:
image = openai.Image.create(prompt=message)
image_url = image["data"][0]["url"]
image_filename = wget.download(image_url)
await bot.api.send_image_message(room_id, image_filename)
os.remove(image_filename)
return None
except Exception as e:
print(f"Error sending image: {e}")
return await bot.api.send_text_message(room_id, f"Error sending image: {e}")
return await bot.api.send_text_message(
room_id,
"Image generation is not available with the Ollama integration. "
"Please consider using a dedicated image generation service.",
)
return None

64
ollama_client.py Normal file
View File

@ -0,0 +1,64 @@
"""Ollama integration for Matrix bot."""
import json
from typing import Any, Dict, List
import requests
HTTP_OK = 200
class OllamaClient:
"""Client for interacting with Ollama API."""
def __init__(self, base_url: str = "http://localhost:11434", model: str = "llama3.2:latest"):
"""Initialize the Ollama client.
Args:
base_url: Base URL for the Ollama API
model: Model name to use for completions
"""
self.base_url = base_url
self.model = model
self.api_url = f"{base_url}/api"
def chat_completion(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
"""Create a chat completion using Ollama.
Args:
messages: List of message dictionaries with 'role' and 'content' keys
Returns:
Dict containing the model's response
"""
# Convert OpenAI-style messages to Ollama format
# Ollama expects a simpler format with just the messages array
payload = {
"model": self.model,
"messages": messages,
"stream": False,
}
response = requests.post(
f"{self.api_url}/chat",
headers={"Content-Type": "application/json"},
data=json.dumps(payload),
)
if response.status_code != HTTP_OK:
raise Exception(f"Error from Ollama API: {response.text}")
result = response.json()
# Convert Ollama response to a format similar to OpenAI for easier integration
return {
"choices": [
{
"message": {
"role": "assistant",
"content": result["message"]["content"],
},
},
],
"model": self.model,
}

View File

@ -19,4 +19,5 @@ ORG_CAPTURE_FILENAME = f"{ORG_LOCATION}/{os.environ.get('ORG_CAPTURE_FILENAME')}
ORG_PLAN_FILENAME = f"{ORG_LOCATION}/{os.environ.get('ORG_PLAN_FILENAME')}"
ORG_LINKS_FILENAME = f"{ORG_LOCATION}/{os.environ.get('ORG_LINKS_FILENAME')}"
OPEN_AI_API_KEY = os.environ.get("OPEN_AI_API_KEY")
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", "llama3.2:latest")