Created
December 22, 2024 10:04
-
-
Save fr0gger/4414cc4d8656d2891547ee3393cd124b to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Pronpt Gen Lite | |
# Author: Thomas Roccia | @fr0gger_ | |
import argparse | |
from openai import OpenAI | |
import anthropic | |
from ollama import Client | |
import requests | |
import os | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") | |
def connect_to_openai(prompt, model="gpt-4o-mini"): | |
client = OpenAI( | |
api_key=os.environ.get("OPENAI_API_KEY") | |
) | |
chat_completion = client.chat.completions.create( | |
messages=[ | |
{ | |
"role": "user", | |
"content": prompt, | |
} | |
], | |
model=model, | |
) | |
return chat_completion.choices[0].message.content | |
def connect_to_anthropic(prompt, model="claude-3-5-sonnet-20241022"): | |
client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY) | |
message = client.messages.create( | |
model=model, | |
max_tokens=1024, | |
messages=[ | |
{"role": "user", "content": prompt} | |
] | |
) | |
return message.content[0].text | |
def connect_to_ollama(prompt, model="llama3.2"): | |
client = Client( | |
host='http://localhost:11434', | |
headers={'x-some-header': 'value-123'} | |
) | |
response = client.chat( | |
model=model, | |
messages=[ | |
{ | |
'role': 'user', | |
'content': prompt, | |
}, | |
] | |
) | |
return response.message.content | |
def generate_prompt(prompt_type, topic): | |
templates = { | |
"few_shot_learning": ( | |
"Few-shot prompting is ideal for cybersecurity tasks like threat analysis and classification. Follow this structure but tailor the examples to the specific topic provided by the user.\n\n" | |
"Example Prompt:\n" | |
"Task: Analyze network logs for signs of a DDoS attack.\n" | |
"Example 1: Log entry '192.168.1.1 sent 1000 requests in 1 second'.\n" | |
"Example 2: Log entry '10.0.0.2 attempted connection to port 22 multiple times in a short interval'.\n" | |
"This structure serves as a guideline; adjust the task and examples to fit the entered topic.\n" | |
"{topic}: " | |
), | |
"chain_of_thought": ( | |
"Using chain-of-thought reasoning, follow this structured example approach for a cybersecurity task. Tailor each step specifically to the given topic:\n\n" | |
"Task: Analyze a suspicious email attachment:\n" | |
"Step 1: Extract metadata from the attachment (e.g., file type, size, hash values).\n" | |
"Step 2: Cross-reference the hash with threat intelligence databases.\n" | |
"Step 3: Decompile the attachment to inspect for obfuscated code or unusual behavior.\n" | |
"Step 4: Simulate execution in a sandbox environment to observe behavior.\n" | |
"Step 5: Summarize findings and classify the threat.\n" | |
"This structure serves as an example. Each step should be tailored to match the specific topic provided by the user.\n" | |
"{topic}: " | |
), | |
"tree_of_thought": ( | |
"Leverage the Tree-of-Thought (ToT) framework for complex cybersecurity tasks requiring exploration or strategic lookahead. Follow this example structure but adapt the tree's nodes and thought progression to the specific topic:\n\n" | |
"Investigate a potential Advanced Persistent Threat (APT):\n" | |
"- Root Thought: Analyze the overall network activity.\n" | |
" - Thought 1: Identify unusual login patterns or IP addresses.\n" | |
" - Subthought 1.1: Correlate IP addresses with known threat actor databases.\n" | |
" - Subthought 1.2: Assess login times for anomalies.\n" | |
" - Thought 2: Monitor data exfiltration attempts.\n" | |
" - Subthought 2.1: Flag unusually large data transfers.\n" | |
" - Subthought 2.2: Identify unauthorized access to sensitive files.\n" | |
"- Use depth-first or breadth-first exploration to expand and evaluate the tree of thoughts.\n" | |
"This structure illustrates ToT principles; customize the tree's depth and breadth based on the provided topic.\n" | |
"{topic}: " | |
), | |
"self_consistency": ( | |
"Apply self-consistency to improve reasoning by sampling multiple diverse reasoning paths and selecting the most consistent answer. Tailor the examples and reasoning paths to the specific topic:\n\n" | |
"Determine if a suspicious login attempt is valid:\n" | |
"1. Path 1: Check the IP address against known threat databases.\n" | |
" - Reasoning: The IP is flagged as suspicious in 2 databases, likely invalid.\n" | |
"2. Path 2: Assess the login time compared to user activity patterns.\n" | |
" - Reasoning: The login time is outside normal hours for this user, likely invalid.\n" | |
"3. Path 3: Check for unusual session duration or access patterns.\n" | |
" - Reasoning: Session duration is consistent with valid user behavior, possibly valid.\n" | |
"Aggregate the reasoning to determine the final outcome: Likely invalid.\n" | |
"This technique ensures consistency across multiple evaluations and improves accuracy for complex topics.\n" | |
"{topic}: " | |
), | |
} | |
if prompt_type not in templates: | |
raise ValueError(f"Invalid prompt type: {prompt_type}. Choose from: {', '.join(templates.keys())}") | |
return templates[prompt_type].format(topic=topic) | |
def main(): | |
parser = argparse.ArgumentParser(description="CLI tool for generating optimized prompts.") | |
parser.add_argument("prompt_type", type=str, help="Prompt technique (e.g., few_shot_learning, chain_of_thought, three_of_thought, self_consistency)") | |
parser.add_argument("topic", type=str, help="Description of the task the prompt is for") | |
parser.add_argument("model", type=str, help="Target model to use (e.g., openai, anthropic, grok, ollama)") | |
args = parser.parse_args() | |
try: | |
user_prompt = generate_prompt(args.prompt_type, args.topic) | |
if args.model == "openai": | |
output = connect_to_openai(user_prompt) | |
elif args.model == "anthropic": | |
output = connect_to_anthropic(user_prompt) | |
elif args.model == "ollama": | |
output = connect_to_ollama(user_prompt) | |
else: | |
raise ValueError(f"Unsupported model: {args.model}. Choose from: openai, anthropic, grok, ollama.") | |
print("\nGenerated Prompt:\n") | |
print(output) | |
except Exception as e: | |
print(f"Error: {e}") | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment