55 Downloads Updated 6 months ago
Tutorial ini akan memandu Anda membuat chatbot Telegram menggunakan model AI ExaBot-Micro melalui API Ollama. ExaBot-Micro adalah model AI yang dioptimasi untuk customer service & marketing dengan ukuran hanya 1.3 GB.
# Install required packages
pip install python-telegram-bot requests aiohttp python-dotenv
# Alternative menggunakan requirements.txt
pip install -r requirements.txt
python-telegram-bot==20.7
requests==2.31.0
aiohttp==3.9.1
python-dotenv==1.0.0
# Telegram Bot Configuration
TELEGRAM_BOT_TOKEN=your_bot_token_here
TELEGRAM_CHAT_ID=your_chat_id_here
# Ollama Configuration
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL_NAME=exabot-micro
# Optional: Advanced Settings
MAX_MESSAGE_LENGTH=4000
TIMEOUT_SECONDS=30
# Test apakah Ollama berjalan
curl http://localhost:11434/api/version
# Test model ExaBot-Micro
curl -X POST http://localhost:11434/api/generate \
-H "Content-Type: application/json" \
-d '{
"model": "exabot-micro",
"prompt": "Halo, siapa kamu?",
"stream": false
}'
# Jika belum ada, buat model ExaBot-Micro
ollama create exabot-micro -f Modelfile-micro
# Test model
ollama run exabot-micro "Halo, perkenalkan dirimu"
/newbotimport os
import asyncio
import logging
from dotenv import load_dotenv
from telegram import Update
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
import requests
import json
# Load environment variables
load_dotenv()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
class ExaBotTelegram:
def __init__(self):
self.bot_token = os.getenv('TELEGRAM_BOT_TOKEN')
self.ollama_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434')
self.model_name = os.getenv('OLLAMA_MODEL_NAME', 'exabot-micro')
self.max_length = int(os.getenv('MAX_MESSAGE_LENGTH', '4000'))
self.timeout = int(os.getenv('TIMEOUT_SECONDS', '30'))
async def start_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handler untuk command /start"""
welcome_message = """
π€ **Selamat datang di ExaBot!**
Saya adalah asisten AI yang dikembangkan oleh Saputra Budi untuk membantu customer service & marketing.
**Commands yang tersedia:**
β’ /start - Menampilkan pesan ini
β’ /help - Bantuan penggunaan
β’ /info - Informasi tentang bot
β’ Atau langsung chat dengan saya!
**Contoh pertanyaan:**
β’ "Apa produk unggulan Anda?"
β’ "Bagaimana cara melakukan pemesanan?"
β’ "Berapa harga produk X?"
Mari mulai percakapan! π¬
"""
await update.message.reply_text(welcome_message, parse_mode='Markdown')
async def help_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handler untuk command /help"""
help_text = """
π **Bantuan ExaBot**
**Cara menggunakan:**
1. Ketik pertanyaan Anda dalam bahasa Indonesia
2. Tunggu respons dari ExaBot
3. Lanjutkan percakapan sesuai kebutuhan
**Tips:**
β’ Gunakan bahasa yang jelas dan spesifik
β’ Bot ini dioptimasi untuk customer service & marketing
β’ Respons akan singkat dan to-the-point
**Masalah teknis?**
Contact: @saputrabudi
"""
await update.message.reply_text(help_text, parse_mode='Markdown')
async def info_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handler untuk command /info"""
info_text = """
βΉοΈ **Informasi ExaBot**
**Model:** ExaBot-Micro v1.0
**Developer:** Saputra Budi
**Engine:** Ollama + Llama 3.2:1b
**Ukuran Model:** 1.3 GB
**Optimized untuk:** Customer Service & Marketing Indonesia
**Kemampuan:**
β
Jawaban singkat dan akurat
β
Bahasa Indonesia natural
β
Response time cepat
β
Context-aware conversation
**Powered by Ollama API**
"""
await update.message.reply_text(info_text, parse_mode='Markdown')
def query_ollama(self, prompt: str) -> str:
"""Send query to Ollama API and get response"""
try:
payload = {
"model": self.model_name,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.5,
"num_predict": 500,
"num_ctx": 1024
}
}
response = requests.post(
f"{self.ollama_url}/api/generate",
json=payload,
timeout=self.timeout
)
if response.status_code == 200:
result = response.json()
return result.get('response', 'Maaf, tidak ada respons dari model.')
else:
logger.error(f"Ollama API error: {response.status_code}")
return "β Maaf, terjadi kesalahan pada sistem AI."
except requests.exceptions.Timeout:
logger.error("Ollama API timeout")
return "β±οΈ Timeout - Mohon coba lagi dalam beberapa saat."
except requests.exceptions.ConnectionError:
logger.error("Cannot connect to Ollama API")
return "π Tidak dapat terhubung ke AI service. Admin telah diberitahu."
except Exception as e:
logger.error(f"Unexpected error: {str(e)}")
return "β Terjadi kesalahan tidak terduga. Mohon coba lagi."
async def handle_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handler untuk semua pesan text"""
user_message = update.message.text
user_name = update.message.from_user.first_name or "User"
# Log incoming message
logger.info(f"Message from {user_name}: {user_message}")
# Show typing indicator
await context.bot.send_chat_action(
chat_id=update.effective_chat.id,
action='typing'
)
# Get AI response
ai_response = self.query_ollama(user_message)
# Truncate if too long
if len(ai_response) > self.max_length:
ai_response = ai_response[:self.max_length] + "..."
# Send response
await update.message.reply_text(ai_response)
# Log response
logger.info(f"Response sent to {user_name}: {ai_response[:100]}...")
async def error_handler(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Log errors caused by updates"""
logger.warning(f'Update "{update}" caused error "{context.error}"')
def run(self):
"""Start the bot"""
if not self.bot_token:
logger.error("TELEGRAM_BOT_TOKEN not found in environment variables")
return
# Create application
application = Application.builder().token(self.bot_token).build()
# Add handlers
application.add_handler(CommandHandler("start", self.start_command))
application.add_handler(CommandHandler("help", self.help_command))
application.add_handler(CommandHandler("info", self.info_command))
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, self.handle_message))
application.add_error_handler(self.error_handler)
# Start the bot
logger.info("Starting ExaBot Telegram Bot...")
print("π€ ExaBot Telegram Bot is running...")
print("Press Ctrl+C to stop")
application.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == '__main__':
bot = ExaBotTelegram()
bot.run()
# Buat file .env
nano .env
# Isi dengan:
TELEGRAM_BOT_TOKEN=1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL_NAME=exabot-micro
MAX_MESSAGE_LENGTH=4000
TIMEOUT_SECONDS=30
# Pastikan Ollama running
ollama serve
# Di terminal lain, jalankan bot
python telegram_bot.py
@exabot_cs_bot/start/help, /infoimport sqlite3
from datetime import datetime, timedelta
class ExaBotWithMemory(ExaBotTelegram):
def __init__(self):
super().__init__()
self.init_database()
def init_database(self):
"""Initialize SQLite database for conversation history"""
self.conn = sqlite3.connect('exabot_conversations.db', check_same_thread=False)
self.conn.execute('''
CREATE TABLE IF NOT EXISTS conversations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
user_name TEXT,
message TEXT,
response TEXT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
self.conn.commit()
def get_conversation_context(self, user_id: int, limit: int = 3) -> str:
"""Get recent conversation context for user"""
cursor = self.conn.execute('''
SELECT message, response FROM conversations
WHERE user_id = ? AND timestamp > datetime('now', '-1 hour')
ORDER BY timestamp DESC LIMIT ?
''', (user_id, limit))
context = ""
for message, response in reversed(cursor.fetchall()):
context += f"User: {message}\nExaBot: {response}\n\n"
return context
def save_conversation(self, user_id: int, user_name: str, message: str, response: str):
"""Save conversation to database"""
self.conn.execute('''
INSERT INTO conversations (user_id, user_name, message, response)
VALUES (?, ?, ?, ?)
''', (user_id, user_name, message, response))
self.conn.commit()
def query_ollama_with_context(self, prompt: str, user_id: int) -> str:
"""Send query with conversation context"""
try:
# Get conversation context
context = self.get_conversation_context(user_id)
# Build enhanced prompt
enhanced_prompt = f"""Context percakapan sebelumnya:
{context}
Pertanyaan terbaru: {prompt}
Jawab dengan mempertimbangkan context percakapan sebelumnya jika relevan."""
payload = {
"model": self.model_name,
"prompt": enhanced_prompt,
"stream": False,
"options": {
"temperature": 0.5,
"num_predict": 500,
"num_ctx": 2048 # Increased for context
}
}
response = requests.post(
f"{self.ollama_url}/api/generate",
json=payload,
timeout=self.timeout
)
if response.status_code == 200:
result = response.json()
return result.get('response', 'Maaf, tidak ada respons dari model.')
else:
logger.error(f"Ollama API error: {response.status_code}")
return "β Maaf, terjadi kesalahan pada sistem AI."
except Exception as e:
logger.error(f"Error in context query: {str(e)}")
return self.query_ollama(prompt) # Fallback to basic query
async def handle_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Enhanced message handler with context"""
user_message = update.message.text
user_id = update.message.from_user.id
user_name = update.message.from_user.first_name or "User"
logger.info(f"Message from {user_name} (ID: {user_id}): {user_message}")
await context.bot.send_chat_action(
chat_id=update.effective_chat.id,
action='typing'
)
# Get AI response with context
ai_response = self.query_ollama_with_context(user_message, user_id)
# Save conversation
self.save_conversation(user_id, user_name, user_message, ai_response)
if len(ai_response) > self.max_length:
ai_response = ai_response[:self.max_length] + "..."
await update.message.reply_text(ai_response)
logger.info(f"Response sent to {user_name}: {ai_response[:100]}...")
from collections import defaultdict
import time
class ExaBotWithRateLimit(ExaBotTelegram):
def __init__(self):
super().__init__()
self.user_requests = defaultdict(list)
self.rate_limit = 10 # messages per minute
self.rate_window = 60 # seconds
def is_rate_limited(self, user_id: int) -> bool:
"""Check if user is rate limited"""
now = time.time()
user_times = self.user_requests[user_id]
# Remove old requests outside the window
user_times[:] = [t for t in user_times if now - t < self.rate_window]
# Check if limit exceeded
if len(user_times) >= self.rate_limit:
return True
# Add current request
user_times.append(now)
return False
async def handle_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Message handler with rate limiting"""
user_id = update.message.from_user.id
if self.is_rate_limited(user_id):
await update.message.reply_text(
"β±οΈ Anda terlalu cepat mengirim pesan. Mohon tunggu sebentar."
)
return
# Continue with normal processing
await super().handle_message(update, context)
FROM python:3.11-slim
WORKDIR /app
# Install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
# Create directory for database
RUN mkdir -p /app/data
# Run the bot
CMD ["python", "telegram_bot.py"]
version: '3.8'
services:
exabot-telegram:
build: .
environment:
- TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN}
- OLLAMA_BASE_URL=http://ollama:11434
- OLLAMA_MODEL_NAME=exabot-micro
volumes:
- ./data:/app/data
depends_on:
- ollama
restart: unless-stopped
ollama:
image: ollama/ollama:latest
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
restart: unless-stopped
volumes:
ollama_data:
# Build dan run
docker-compose up -d
# Setup model di container ollama
docker-compose exec ollama ollama pull llama3.2:1b
docker-compose exec ollama ollama create exabot-micro -f /path/to/Modelfile-micro
# Check logs
docker-compose logs -f exabot-telegram
# Check Ollama service
curl http://localhost:11434/api/version
# Check model availability
ollama list | grep exabot-micro
# Check bot logs
tail -f telegram_bot.log
# Increase timeout di .env
TIMEOUT_SECONDS=60
# Atau di code:
payload["options"]["timeout"] = 60
# Limit Ollama memory
OLLAMA_MAX_LOADED_MODELS=1 ollama serve
# Monitor usage
docker stats
# Add delay between responses
import asyncio
await asyncio.sleep(1)
import sqlite3
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
def generate_usage_report():
"""Generate bot usage report"""
conn = sqlite3.connect('exabot_conversations.db')
# Daily usage stats
cursor = conn.execute('''
SELECT DATE(timestamp) as date, COUNT(*) as messages
FROM conversations
WHERE timestamp > datetime('now', '-7 days')
GROUP BY DATE(timestamp)
ORDER BY date
''')
dates, counts = zip(*cursor.fetchall()) if cursor.fetchall() else ([], [])
# Plot usage
plt.figure(figsize=(10, 6))
plt.plot(dates, counts, marker='o')
plt.title('ExaBot Daily Usage (Last 7 Days)')
plt.xlabel('Date')
plt.ylabel('Messages')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig('usage_report.png')
plt.show()
# Top users
cursor = conn.execute('''
SELECT user_name, COUNT(*) as messages
FROM conversations
WHERE timestamp > datetime('now', '-7 days')
GROUP BY user_id, user_name
ORDER BY messages DESC
LIMIT 10
''')
print("\nπ Top Users (Last 7 Days):")
for user, count in cursor.fetchall():
print(f"β’ {user}: {count} messages")
conn.close()
if __name__ == '__main__':
generate_usage_report()
#!/bin/bash
echo "π Deploying ExaBot Telegram Bot..."
# Stop existing containers
docker-compose down
# Pull latest changes
git pull origin main
# Rebuild and start
docker-compose up -d --build
# Wait for services to start
sleep 10
# Test bot health
curl -f http://localhost:11434/api/version || exit 1
echo "β
ExaBot deployed successfully!"
echo "π Check status: docker-compose ps"
echo "π View logs: docker-compose logs -f exabot-telegram"
User: /start
Bot: π€ Selamat datang di ExaBot! Saya adalah asisten AI yang dikembangkan oleh Saputra Budi untuk membantu customer service & marketing...
User: Halo, siapa kamu?
Bot: Halo! Saya ExaBot, Asisten AI yang dikembangkan oleh Saputra Budi untuk customer service & marketing. Saya siap membantu Anda dengan pertanyaan seputar produk, layanan, atau informasi lainnya. Ada yang bisa saya bantu hari ini?
User: Apa produk unggulan kalian?
Bot: Sebagai asisten AI, saya dapat membantu menjelaskan berbagai produk unggulan yang tersedia. Namun untuk informasi spesifik tentang produk unggulan perusahaan tertentu, mohon berikan detail lebih lanjut tentang bisnis atau industri yang Anda maksud, sehingga saya dapat memberikan rekomendasi yang lebih tepat.
User: Bagaimana cara melakukan pemesanan?
Bot: Berikut cara umum melakukan pemesanan:
1. π **Kontak Langsung**: Hubungi customer service
2. π **Website**: Akses portal pemesanan online
3. π± **Aplikasi**: Gunakan mobile app jika tersedia
4. π¬ **Chat**: Melalui live chat atau WhatsApp
Untuk pemesanan spesifik, mohon informasikan jenis produk/layanan yang ingin dipesan agar saya dapat memberikan panduan yang lebih detail.