Created
March 9, 2025 00:42
-
-
Save bossjones/09c437e1248ffceadccb28a4ba68eae1 to your computer and use it in GitHub Desktop.
python port of https://github.com/modelcontextprotocol/servers/blob/main/src/sequentialthinking/index.ts
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import asyncio | |
import json | |
import sys | |
from typing import Dict, List, Any, Optional, TypedDict | |
from mcp.server import Server | |
from mcp.server.stdio import StdioServerTransport | |
from mcp.types import CallToolRequestSchema, ListToolsRequestSchema, Tool | |
# Define the ThoughtData type | |
class ThoughtData(TypedDict, total=False): | |
thought: str | |
thoughtNumber: int | |
totalThoughts: int | |
nextThoughtNeeded: bool | |
isRevision: Optional[bool] | |
revisesThought: Optional[int] | |
branchFromThought: Optional[int] | |
branchId: Optional[str] | |
needsMoreThoughts: Optional[bool] | |
class SequentialThinkingServer: | |
def __init__(self): | |
self.thought_history: List[ThoughtData] = [] | |
self.branches: Dict[str, List[ThoughtData]] = {} | |
def validate_thought_data(self, input_data: Dict[str, Any]) -> ThoughtData: | |
"""Validate and convert input data to ThoughtData type""" | |
if not input_data.get('thought') or not isinstance(input_data['thought'], str): | |
raise ValueError('Invalid thought: must be a string') | |
if not input_data.get('thoughtNumber') or not isinstance(input_data['thoughtNumber'], int): | |
raise ValueError('Invalid thoughtNumber: must be a number') | |
if not input_data.get('totalThoughts') or not isinstance(input_data['totalThoughts'], int): | |
raise ValueError('Invalid totalThoughts: must be a number') | |
if not isinstance(input_data.get('nextThoughtNeeded'), bool): | |
raise ValueError('Invalid nextThoughtNeeded: must be a boolean') | |
return { | |
'thought': input_data['thought'], | |
'thoughtNumber': input_data['thoughtNumber'], | |
'totalThoughts': input_data['totalThoughts'], | |
'nextThoughtNeeded': input_data['nextThoughtNeeded'], | |
'isRevision': input_data.get('isRevision'), | |
'revisesThought': input_data.get('revisesThought'), | |
'branchFromThought': input_data.get('branchFromThought'), | |
'branchId': input_data.get('branchId'), | |
'needsMoreThoughts': input_data.get('needsMoreThoughts') | |
} | |
def format_thought(self, thought_data: ThoughtData) -> str: | |
"""Format a thought with colored output and borders""" | |
# Colors for terminal output | |
BLUE = '\033[94m' | |
YELLOW = '\033[93m' | |
GREEN = '\033[92m' | |
RESET = '\033[0m' | |
thought_number = thought_data['thoughtNumber'] | |
total_thoughts = thought_data['totalThoughts'] | |
thought = thought_data['thought'] | |
is_revision = thought_data.get('isRevision', False) | |
revises_thought = thought_data.get('revisesThought') | |
branch_from_thought = thought_data.get('branchFromThought') | |
branch_id = thought_data.get('branchId') | |
prefix = '' | |
context = '' | |
if is_revision: | |
prefix = f"{YELLOW}🔄 Revision{RESET}" | |
context = f" (revising thought {revises_thought})" | |
elif branch_from_thought: | |
prefix = f"{GREEN}🌿 Branch{RESET}" | |
context = f" (from thought {branch_from_thought}, ID: {branch_id})" | |
else: | |
prefix = f"{BLUE}💭 Thought{RESET}" | |
context = '' | |
header = f"{prefix} {thought_number}/{total_thoughts}{context}" | |
border = '─' * max(len(header.replace(BLUE, '').replace(YELLOW, '').replace(GREEN, '').replace(RESET, '')), len(thought) + 2) | |
return f""" | |
┌{border}┐ | |
│ {header} │ | |
├{border}┤ | |
│ {thought.ljust(len(border) - 2)} │ | |
└{border}┘""" | |
def process_thought(self, input_data: Dict[str, Any]) -> Dict[str, Any]: | |
"""Process a thought and return a formatted response""" | |
try: | |
validated_input = self.validate_thought_data(input_data) | |
if validated_input['thoughtNumber'] > validated_input['totalThoughts']: | |
validated_input['totalThoughts'] = validated_input['thoughtNumber'] | |
self.thought_history.append(validated_input) | |
if validated_input.get('branchFromThought') and validated_input.get('branchId'): | |
branch_id = validated_input['branchId'] | |
if branch_id not in self.branches: | |
self.branches[branch_id] = [] | |
self.branches[branch_id].append(validated_input) | |
formatted_thought = self.format_thought(validated_input) | |
print(formatted_thought, file=sys.stderr) | |
return { | |
'content': [{ | |
'type': 'text', | |
'text': json.dumps({ | |
'thoughtNumber': validated_input['thoughtNumber'], | |
'totalThoughts': validated_input['totalThoughts'], | |
'nextThoughtNeeded': validated_input['nextThoughtNeeded'], | |
'branches': list(self.branches.keys()), | |
'thoughtHistoryLength': len(self.thought_history) | |
}, indent=2) | |
}] | |
} | |
except Exception as error: | |
return { | |
'content': [{ | |
'type': 'text', | |
'text': json.dumps({ | |
'error': str(error), | |
'status': 'failed' | |
}, indent=2) | |
}], | |
'isError': True | |
} | |
# Define the Sequential Thinking Tool | |
SEQUENTIAL_THINKING_TOOL: Tool = { | |
'name': 'sequentialthinking', | |
'description': """A detailed tool for dynamic and reflective problem-solving through thoughts. | |
This tool helps analyze problems through a flexible thinking process that can adapt and evolve. | |
Each thought can build on, question, or revise previous insights as understanding deepens. | |
When to use this tool: | |
- Breaking down complex problems into steps | |
- Planning and design with room for revision | |
- Analysis that might need course correction | |
- Problems where the full scope might not be clear initially | |
- Problems that require a multi-step solution | |
- Tasks that need to maintain context over multiple steps | |
- Situations where irrelevant information needs to be filtered out | |
Key features: | |
- You can adjust total_thoughts up or down as you progress | |
- You can question or revise previous thoughts | |
- You can add more thoughts even after reaching what seemed like the end | |
- You can express uncertainty and explore alternative approaches | |
- Not every thought needs to build linearly - you can branch or backtrack | |
- Generates a solution hypothesis | |
- Verifies the hypothesis based on the Chain of Thought steps | |
- Repeats the process until satisfied | |
- Provides a correct answer | |
Parameters explained: | |
- thought: Your current thinking step, which can include: | |
* Regular analytical steps | |
* Revisions of previous thoughts | |
* Questions about previous decisions | |
* Realizations about needing more analysis | |
* Changes in approach | |
* Hypothesis generation | |
* Hypothesis verification | |
- next_thought_needed: True if you need more thinking, even if at what seemed like the end | |
- thought_number: Current number in sequence (can go beyond initial total if needed) | |
- total_thoughts: Current estimate of thoughts needed (can be adjusted up/down) | |
- is_revision: A boolean indicating if this thought revises previous thinking | |
- revises_thought: If is_revision is true, which thought number is being reconsidered | |
- branch_from_thought: If branching, which thought number is the branching point | |
- branch_id: Identifier for the current branch (if any) | |
- needs_more_thoughts: If reaching end but realizing more thoughts needed | |
You should: | |
1. Start with an initial estimate of needed thoughts, but be ready to adjust | |
2. Feel free to question or revise previous thoughts | |
3. Don't hesitate to add more thoughts if needed, even at the "end" | |
4. Express uncertainty when present | |
5. Mark thoughts that revise previous thinking or branch into new paths | |
6. Ignore information that is irrelevant to the current step | |
7. Generate a solution hypothesis when appropriate | |
8. Verify the hypothesis based on the Chain of Thought steps | |
9. Repeat the process until satisfied with the solution | |
10. Provide a single, ideally correct answer as the final output | |
11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached""", | |
'inputSchema': { | |
'type': 'object', | |
'properties': { | |
'thought': { | |
'type': 'string', | |
'description': 'Your current thinking step' | |
}, | |
'nextThoughtNeeded': { | |
'type': 'boolean', | |
'description': 'Whether another thought step is needed' | |
}, | |
'thoughtNumber': { | |
'type': 'integer', | |
'description': 'Current thought number', | |
'minimum': 1 | |
}, | |
'totalThoughts': { | |
'type': 'integer', | |
'description': 'Estimated total thoughts needed', | |
'minimum': 1 | |
}, | |
'isRevision': { | |
'type': 'boolean', | |
'description': 'Whether this revises previous thinking' | |
}, | |
'revisesThought': { | |
'type': 'integer', | |
'description': 'Which thought is being reconsidered', | |
'minimum': 1 | |
}, | |
'branchFromThought': { | |
'type': 'integer', | |
'description': 'Branching point thought number', | |
'minimum': 1 | |
}, | |
'branchId': { | |
'type': 'string', | |
'description': 'Branch identifier' | |
}, | |
'needsMoreThoughts': { | |
'type': 'boolean', | |
'description': 'If more thoughts are needed' | |
} | |
}, | |
'required': ['thought', 'nextThoughtNeeded', 'thoughtNumber', 'totalThoughts'] | |
} | |
} | |
async def run_server(): | |
# Create server instance | |
server = Server( | |
{ | |
'name': 'sequential-thinking-server', | |
'version': '0.2.0', | |
}, | |
{ | |
'capabilities': { | |
'tools': {}, | |
}, | |
} | |
) | |
# Create thinking server instance | |
thinking_server = SequentialThinkingServer() | |
# Register handlers | |
@server.request_handler(ListToolsRequestSchema) | |
async def handle_list_tools_request(request): | |
return {'tools': [SEQUENTIAL_THINKING_TOOL]} | |
@server.request_handler(CallToolRequestSchema) | |
async def handle_call_tool_request(request): | |
if request['params']['name'] == 'sequentialthinking': | |
return thinking_server.process_thought(request['params']['arguments']) | |
return { | |
'content': [{ | |
'type': 'text', | |
'text': f"Unknown tool: {request['params']['name']}" | |
}], | |
'isError': True | |
} | |
# Connect to transport | |
transport = StdioServerTransport() | |
await server.connect(transport) | |
print("Sequential Thinking MCP Server running on stdio", file=sys.stderr) | |
if __name__ == "__main__": | |
try: | |
asyncio.run(run_server()) | |
except Exception as e: | |
print(f"Fatal error running server: {e}", file=sys.stderr) | |
sys.exit(1) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment