forked from Xueheng-Li/SynologyChatbotGPT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbasicBot.py
134 lines (94 loc) · 4.55 KB
/
basicBot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import json, time
import requests
import openai
from flask import Flask, request, jsonify
from settings import *
# Set up OpenAI API key
openai.api_key = openai_api_key
# In-memory conversation history storage
conversation_history = {}
def send_back_message(user_id, response_text):
payload = 'payload=' + json.dumps({
'text': response_text,
"user_ids": [int(user_id)]
})
# Add a try-except block to handle potential exceptions during the POST request
try:
response = requests.post(INCOMING_WEBHOOK_URL, payload)
# response = requests.post(INCOMING_WEBHOOK_URL, json=payload)
response.raise_for_status() # Raise an exception if the response contains an HTTP error
except requests.exceptions.RequestException as e:
print(f"Error sending message to Synology Chat: {e}")
return "Error sending message to Synology Chat", 500
def process_synology_chat_message(event):
if event.get('token') != OUTGOING_WEBHOOK_TOKEN:
return "Invalid token"
user_id = event.get('user_id')
text = event.get('text')
username = event.get('username')
# generate an instant pre-response
# time.sleep(0.1)
send_back_message(user_id, "...")
# generate and send back the proper response
response_text = generate_gpt_response(user_id, username, text)
print(response_text)
send_back_message(user_id, response_text)
return "Message processed"
def generate_gpt_response(user_id, username, message, max_conversation_length=max_conversation_length, refresh_keywords=None, max_time_gap=max_time_gap):
# max_conversation_length sets the maximum length for each conversation
# refresh_keywords store the keywords to start a new conversation
# Check for refresh_prompt input to start a new conversation
if refresh_keywords is None:
refresh_keywords = ["new", "refresh", "00", "restart", "刷新", "新话题", "退下", "结束", "over"]
if message.strip().lower() in refresh_keywords:
if user_id in conversation_history:
del conversation_history[user_id]
return "----------------------------"
current_timestamp = int(time.time())
# Check if the conversation has been idle for 30 minutes (1800 seconds)
if (user_id in conversation_history and
current_timestamp - conversation_history[user_id]["last_timestamp"] >= max_time_gap*60):
del conversation_history[user_id]
# Maintain conversation history
if user_id not in conversation_history:
conversation_history[user_id] = {"username": username, "messages": [], "last_timestamp": current_timestamp}
else:
conversation_history[user_id]["last_timestamp"] = current_timestamp
# Truncate conversation history if it exceeds the maximum length
if len(conversation_history[user_id]["messages"]) > max_conversation_length:
conversation_history[user_id]["messages"] = conversation_history[user_id]["messages"][-max_conversation_length:]
conversation_history[user_id]["messages"].append({"role": "user", "content": message})
system_prompt = system_prompt
messages = [{"role": "system", "content": system_prompt}]
for entry in conversation_history[user_id]["messages"]:
role = entry['role']
content = entry['content']
messages.append({"role": role, "content": content})
print(f"messages: {messages}")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=temperature,
)
response_role = response['choices'][0]['message']['role']
if response['choices'][0]['finish_reason'] == "stop":
response_text = response['choices'][0]['message']['content']
conversation_history[user_id]["messages"].append({"role": response_role, "content": response_text})
else:
conversation_history[user_id]["messages"].append({"role": response_role, "content": f"error: stop reason - {response['choices'][0]['finish_reason']}"})
return response_text
def handle_request(event):
print(f"event: {event}")
if not event:
return "Empty request body", 400
return process_synology_chat_message(event)
app = Flask(__name__)
@app.route("/webhook", methods=["POST"])
def webhook():
# Parse URL-encoded form data
form_data = request.form
# Convert the form data to a dictionary
event = {key: form_data.get(key) for key in form_data}
return handle_request(event) # Pass the event dictionary instead of the raw request body
if __name__ == "__main__":
app.run(host="0.0.0.0", port=PORT)