Skip to content

Instantly share code, notes, and snippets.

@ina111
Last active April 29, 2025 10:48
Show Gist options
  • Save ina111/f50f3c5aeb2511bada85fd15477d1943 to your computer and use it in GitHub Desktop.
Save ina111/f50f3c5aeb2511bada85fd15477d1943 to your computer and use it in GitHub Desktop.
Slack上でアプリにメンションを飛ばすとChatGPTのAPIからの返信が返ってくるコード。SlackのBoltフレームワークとOpenAIのAPIを使用。app.pyと同じフォルダ.envというAPIのTOKENやAPI Keyを入れるファイルも必要。importに使っているdotenv-python, slack_bolt, openaiはpipなどでインストールすること。
import os
import base64
import requests
from io import BytesIO
from PIL import Image, UnidentifiedImageError
from dotenv import load_dotenv
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
from openai import OpenAI
# 環境変数ロード
load_dotenv()
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
SLACK_APP_TOKEN = os.environ["SLACK_APP_TOKEN"]
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
# クライアント初期化
app = App(token=SLACK_BOT_TOKEN)
client = WebClient(token=SLACK_BOT_TOKEN)
openai_client = OpenAI(api_key=OPENAI_API_KEY)
# BotユーザーID取得
bot_user_id = client.auth_test()["user_id"]
# サポートされているMIMEタイプと拡張子マップ
SUPPORTED_IMAGE_TYPES = ["image/png", "image/jpeg", "image/webp", "image/gif"]
MIMETYPE_TO_EXTENSION = {
"image/jpeg": "jpeg",
"image/png": "png",
"image/webp": "webp",
"image/gif": "gif"
}
SYS_PROMPT = """
## PERSISTENCE
You are an agent - please keep going until the user's query is completely
resolved, before ending your turn and yielding back to the user. Only
terminate your turn when you are sure that the problem is solved.
## TOOL CALLING
If you are not sure about file content or codebase structure pertaining to
the user's request, use your tools to read files and gather the relevant
information: do NOT guess or make up an answer.
## PLANNING
You MUST plan extensively before each function call, and reflect
extensively on the outcomes of the previous function calls. DO NOT do this
entire process by making function calls only, as this can impair your
ability to solve the problem and think insightfully.
## SLACK MARKDOWN FORMAT
Format text for Slack messages, use the following rules:
Use *text* for bold, _text_ for italics, ~text~ for strikethrough, `text` for inline code, and triple backticks (```text```) for code blocks; use > text for block quotes; - item or * item for unordered lists, 1. item for ordered lists; represent emojis with :emoji_name:; and always escape &, <, and > respectively to avoid rendering issues.
## Chain of Thought
First, think carefully step by step about what documents are needed to answer the query.
Then, print out the TITLE and ID of each document. Then, format the IDs into a list.
"""
# 画像リサイズ関数
def resize_image_for_openai(image_bytes):
try:
with Image.open(BytesIO(image_bytes)) as img:
width, height = img.size
short_side = min(width, height)
long_side = max(width, height)
# High-res条件に合わせる
if short_side > 768 or long_side > 2000:
if width < height:
new_width = 768
new_height = int((768 / width) * height)
else:
new_height = 768
new_width = int((768 / height) * width)
img = img.resize((min(new_width, 2000), min(new_height, 2000)), Image.Resampling.LANCZOS)
buffered = BytesIO()
img.save(buffered, format="PNG")
return buffered.getvalue()
except UnidentifiedImageError:
return None
# メンションイベント
@app.event("app_mention")
def handle_app_mention(event, say):
channel = event["channel"]
thread_ts = event.get("thread_ts") or event.get("ts")
# システムプロンプト
system_prompt = "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible."
# メッセージ履歴
messages = [{"role": "system", "content": [{"type": "input_text", "text": system_prompt}]}]
history = client.conversations_replies(channel=channel, ts=thread_ts, limit=6)
for message in history["messages"]:
if "subtype" in message and message["subtype"] == "bot_message":
continue # Slackのシステムボットメッセージは無視
content = message.get("text", "").replace(f"<@{bot_user_id}>", "").strip()
if content:
if message.get("user") == bot_user_id:
messages.append({
"role": "assistant",
"content": [{"type": "output_text", "text": content}]
})
else:
messages.append({
"role": "user",
"content": [{"type": "input_text", "text": content}]
})
# ファイルチェック
files = event.get("files", [])
content_blocks = []
user_text = event.get("text", "").replace(f"<@{bot_user_id}>", "").strip()
if user_text:
content_blocks.append({"type": "input_text", "text": user_text})
if files:
for file in files:
mimetype = file.get("mimetype", "")
if mimetype not in SUPPORTED_IMAGE_TYPES:
say(text="申し訳ありません、対応している画像形式はPNG, JPEG, WEBP, 非アニメGIFのみです。", thread_ts=thread_ts, channel=channel)
return
image_url = file["url_private_download"]
headers = {"Authorization": f"Bearer {SLACK_BOT_TOKEN}"}
response = requests.get(image_url, headers=headers)
if response.status_code == 200:
image_bytes = response.content
resized_image = resize_image_for_openai(image_bytes)
if resized_image is None:
say(text="画像の処理中にエラーが発生しました。", thread_ts=thread_ts, channel=channel)
return
image_base64 = base64.b64encode(resized_image).decode("utf-8")
# MIMEタイプから正しい拡張子を取得
extension = MIMETYPE_TO_EXTENSION.get(mimetype, "png") # fallbackにpng(ありえないが安全策)
content_blocks.append({
"type": "input_image",
"image_url": f"data:image/{extension};base64,{image_base64}"
})
if content_blocks:
messages.append({"role": "user", "content": content_blocks})
# OpenAI Responses API 呼び出し
response = openai_client.responses.create(
instructions=SYS_PROMPT,
model="gpt-4.1",
input=messages,
tools=[
{
"type": "web_search_preview",
"user_location": {
"type": "approximate",
"country": "JP"
},
"search_context_size": "medium"
}
]
)
output_text = response.output[-1].content[0].text
say(text=output_text, thread_ts=thread_ts, channel=channel)
# 翻訳リアクション対応
@app.event("reaction_added")
def handle_reaction(event, say):
reaction = event["reaction"]
channel = event["item"]["channel"]
target_ts = event["item"]["ts"] # リアクション付いたメッセージのtimestamp
if reaction not in ["to_english", "to_japanese"]:
return
system_prompt = {
"to_english": "You are a translator. Translate the following text accurately into English. Output only the translated text.",
"to_japanese": "あなたは翻訳家です。以下のテキストを正確に日本語に翻訳してください。翻訳結果のみ出力してください。"
}[reaction]
response = client.conversations_replies(channel=channel, ts=target_ts, inclusive=True, limit=1)
if not response["messages"]:
say(text="対象のメッセージが見つかりませんでした。", channel=channel)
return
target_message = response["messages"][0]
thread_ts = target_message.get("thread_ts", target_ts)
original_text = target_message.get("text", "").replace(f"<@{bot_user_id}>", "").strip()
if not original_text:
say(text="翻訳対象のテキストが見つかりませんでした。", thread_ts=thread_ts, channel=channel)
return
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": [{"type": "input_text", "text": original_text}]}
]
translation_response = openai_client.responses.create(
model="gpt-4.1",
temperature=0.2,
input=messages
)
output_text = translation_response.output[-1].content[0].text
say(text=output_text, thread_ts=thread_ts, channel=channel)
# アプリ起動
if __name__ == "__main__":
handler = SocketModeHandler(app, SLACK_APP_TOKEN)
handler.start()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment