|
| 1 | +import os |
| 2 | +import time |
| 3 | +import logging |
| 4 | +from telethon import TelegramClient, events |
| 5 | +import requests |
| 6 | +import asyncio |
| 7 | +import aiohttp |
| 8 | +from telegram import ChatAction |
| 9 | +from telegram.ext import Updater, CommandHandler |
| 10 | +import requests |
| 11 | +# Set up logging |
| 12 | +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| 13 | +logger = logging.getLogger(__name__) |
| 14 | + |
| 15 | +def get_current_model(): |
| 16 | + url = f"http://{LLAMA_HOST}:8000/v1/models" |
| 17 | + headers = {'accept': 'application/json'} |
| 18 | + response = requests.get(url, headers=headers) |
| 19 | + |
| 20 | + if response.status_code == 200: |
| 21 | + models_data = response.json() |
| 22 | + for model in models_data["data"]: |
| 23 | + if model["owned_by"] == "me": |
| 24 | + return model["id"] |
| 25 | + else: |
| 26 | + logger.error("Failed to fetch current model. Status code: %s", response.status_code) |
| 27 | + return None |
| 28 | +@client.on(events.NewMessage(pattern='(?i)/beth')) |
| 29 | +async def beth(event): |
| 30 | + global message_count |
| 31 | + |
| 32 | + try: |
| 33 | + # Send typing action |
| 34 | + await client.send_chat_action(event.chat_id, ChatAction.TYPING) |
| 35 | + |
| 36 | + # Check if message count has reached limit |
| 37 | + if message_count >= 1000: |
| 38 | + # Send "tired" message and return |
| 39 | + await event.respond("I'm hungover, I can't answer any more messages.") |
| 40 | + return |
| 41 | + |
| 42 | + # Get the message text |
| 43 | + message_text = event.message.message.replace('/beth', '').strip().lower() |
| 44 | + parts = message_text.split(' ') |
| 45 | + logger.debug(f"Parts: {parts}") |
| 46 | + if len(parts) >= 2: |
| 47 | + try: |
| 48 | + temperature = float(parts[0]) |
| 49 | + if 0.0 <= temperature <= 2.0: |
| 50 | + message_text = message_text.replace(f"{parts[0]} ", '') |
| 51 | + else: |
| 52 | + await event.respond("Too hot for me!") |
| 53 | + return |
| 54 | + except ValueError: |
| 55 | + temperature = 0.7 |
| 56 | + else: |
| 57 | + temperature = 0.7 |
| 58 | + |
| 59 | + # Prepare the API request |
| 60 | + headers = {'accept': 'application/json', 'Content-Type': 'application/json'} |
| 61 | + data = { |
| 62 | + # "prompt": f"{message_text} Chess prodigy Beth Harmon overcomes addiction challenges male dominated world replies", |
| 63 | + "prompt": message_text, |
| 64 | + "temperature": temperature, |
| 65 | + "max_tokens": 50 # Adjust this value as needed |
| 66 | + } |
| 67 | + |
| 68 | + # Log the user prompt |
| 69 | + logger.info(f"Temp: {temperature}, prompt: {message_text}") |
| 70 | + |
| 71 | + # Record the time before the API request |
| 72 | + start_time = time.time() |
| 73 | + |
| 74 | + # Send the API request |
| 75 | + api_url = f'http://{LLAMA_HOST}:8000/v1/completions' |
| 76 | + response = requests.post(api_url, headers=headers, json=data) |
| 77 | + |
| 78 | + # Record the time after the API request |
| 79 | + end_time = time.time() |
| 80 | + message_count += 1 |
| 81 | + |
| 82 | + # Calculate the time difference |
| 83 | + time_difference = end_time - start_time |
| 84 | + minutes, seconds = divmod(time_difference, 60) |
| 85 | + |
| 86 | + # Check if the request was successful |
| 87 | + if response.status_code == 200: |
| 88 | + # Parse the response and send the result to the chat |
| 89 | + api_result = response.json() |
| 90 | + result_text = api_result['choices'][0]['text'].strip() |
| 91 | + |
| 92 | + # Add a default message if the result_text is empty |
| 93 | + if not result_text: |
| 94 | + result_text = "I'm sorry, but it is still your turn to move." |
| 95 | + |
| 96 | + # Format the response time |
| 97 | + response_time = f"({int(minutes)}m{int(seconds)}s)" |
| 98 | + |
| 99 | + # Add the response time to the result text |
| 100 | + result_text_with_time = f"{result_text} {response_time}" |
| 101 | + |
| 102 | + # Log the API response |
| 103 | + logger.info(f"API response: {result_text_with_time}") |
| 104 | + |
| 105 | + await client.send_message(event.chat_id, result_text_with_time) |
| 106 | + else: |
| 107 | + # Send an error message if the request was not successful |
| 108 | + await client.send_message(event.chat_id, "Sorry, I need to go to the bathroom. Back soon!") |
| 109 | + |
| 110 | + except Exception as e: |
| 111 | + # Handle exceptions and send an error message |
| 112 | + logger.error(f"Error: {e}") |
| 113 | + await client.send_message(event.chat_id, "Oops. Broke the chess board.") |
| 114 | + |
| 115 | +LLAMA_HOST = os.environ.get("LLAMA_HOST") |
| 116 | + |
| 117 | +# Read the Telegram API credentials from environment variables |
| 118 | +API_ID = int(os.environ.get("API_ID")) |
| 119 | +API_HASH = os.environ.get("API_HASH") |
| 120 | +BOT_TOKEN = os.environ.get("BOT_TOKEN") |
| 121 | + |
| 122 | +# Create a Telegram client |
| 123 | +client = TelegramClient('bot', API_ID, API_HASH).start(bot_token=BOT_TOKEN) |
| 124 | + |
| 125 | +# Initialize global message count variable |
| 126 | +message_count = 0 |
| 127 | + |
| 128 | +current_model = get_current_model() |
| 129 | +if current_model: |
| 130 | + logger.info("Starting Telegram bot with Llama model: %s ", current_model) |
| 131 | + |
| 132 | + # Start the Telegram client |
| 133 | + client.run_until_disconnected() |
0 commit comments