diff --git a/advanced-prompting-patterns/conversation_manager.py b/advanced-prompting-patterns/conversation_manager.py new file mode 100644 index 0000000..53792d6 --- /dev/null +++ b/advanced-prompting-patterns/conversation_manager.py @@ -0,0 +1,117 @@ +import os +import tiktoken +import json +from openai import OpenAI +from datetime import datetime + +DEFAULT_API_KEY = os.environ.get("TOGETHER_API_KEY") +DEFAULT_BASE_URL = "https://api.together.xyz/v1" +DEFAULT_MODEL = "meta-llama/Meta-Llama-3-8B-Instruct-Lite" +DEFAULT_TEMPERATURE = 0.7 +DEFAULT_MAX_TOKENS = 350 +DEFAULT_TOKEN_BUDGET = 4096 + + +class ConversationManager: + def __init__(self, api_key=None, base_url=None, model=None, history_file=None, temperature=None, max_tokens=None, token_budget=None): + if not api_key: + api_key = DEFAULT_API_KEY + if not base_url: + base_url = DEFAULT_BASE_URL + + self.client = OpenAI( + api_key=api_key, + base_url=base_url + ) + if history_file is None: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + self.history_file = f"conversation_history_{timestamp}.json" + else: + self.history_file = history_file + + self.model = model if model else DEFAULT_MODEL + self.temperature = temperature if temperature else DEFAULT_TEMPERATURE + self.max_tokens = max_tokens if max_tokens else DEFAULT_MAX_TOKENS + self.token_budget = token_budget if token_budget else DEFAULT_TOKEN_BUDGET + + self.system_messages = { + "blogger": "You are a creative blogger specializing in engaging and informative content for GlobalJava Roasters.", + "social_media_expert": "You are a social media expert, crafting catchy and shareable posts for GlobalJava Roasters.", + "creative_assistant": "You are a creative assistant skilled in crafting engaging marketing content for GlobalJava Roasters.", + "custom": "Enter your custom system message here." + } + self.system_message = self.system_messages["creative_assistant"] + self.conversation_history = [{"role": "system", "content": self.get_system_message()}] + + def count_tokens(self, text): + try: + encoding = tiktoken.encoding_for_model(self.model) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + + tokens = encoding.encode(text) + return len(tokens) + + def total_tokens_used(self): + return sum(self.count_tokens(message['content']) for message in self.conversation_history) + + def enforce_token_budget(self): + while self.total_tokens_used() > self.token_budget: + if len(self.conversation_history) <= 1: + break + self.conversation_history.pop(1) + + def set_persona(self, persona): + if persona in self.system_messages: + self.system_message = self.system_messages[persona] + self.update_system_message_in_history() + else: + raise ValueError(f"Unknown persona: {persona}. Available personas are: {list(self.system_messages.keys())}") + + def set_custom_system_message(self, custom_message): + if not custom_message: + raise ValueError("Custom message cannot be empty.") + self.system_messages['custom'] = custom_message + self.set_persona('custom') + + def get_system_message(self): + system_message = self.system_message + system_message += f"\nImportant: Tailor your response to fit within {DEFAULT_MAX_TOKENS/2} word limit\n" + return system_message + + def update_system_message_in_history(self): + if self.conversation_history and self.conversation_history[0]["role"] == "system": + self.conversation_history[0]["content"] = self.get_system_message() + else: + system_message = self.system_message + system_message += f"\nImportant: Tailor your response to fit within {DEFAULT_MAX_TOKENS/2} words limit\n" + self.conversation_history.insert(0, {"role": "system", "content": self.get_system_message()}) + + def chat_completion(self, prompt, temperature=None, max_tokens=None): + temperature = temperature if temperature is not None else self.temperature + max_tokens = max_tokens if max_tokens is not None else self.max_tokens + + self.conversation_history.append({"role": "user", "content": prompt}) + + self.enforce_token_budget() + + try: + response = self.client.chat.completions.create( + model=self.model, + messages=self.conversation_history, + temperature=temperature, + max_tokens=max_tokens, + ) + except Exception as e: + print(f"An error occurred while generating a response: {e}") + return None + + ai_response = response.choices[0].message.content + self.conversation_history.append({"role": "assistant", "content": ai_response}) + + return ai_response + + def reset_conversation_history(self): + self.conversation_history = [{"role": "system", "content": self.self.get_system_message()}] + + diff --git a/advanced-prompting-patterns/solution-files/campaign_generator.py b/advanced-prompting-patterns/solution-files/campaign_generator.py new file mode 100644 index 0000000..b70549f --- /dev/null +++ b/advanced-prompting-patterns/solution-files/campaign_generator.py @@ -0,0 +1,183 @@ +from conversation_manager import ConversationManager +from prompt_templates import build_campaign_prompt, build_extract_prompt, build_draft_prompt +from models import CampaignBrief, ExtractedProductInfo +from utils import validate_json_output + +TEMPERATURE = 0.2 + +def generate_campaign_brief(factsheet, max_retries=3): + """Generate a validated campaign brief with automatic repair.""" + conversation = ConversationManager() + + initial_prompt = build_campaign_prompt(factsheet) + response = conversation.chat_completion(initial_prompt, temperature=TEMPERATURE) + + print("Initial response:") + print(response) + print() + + success, result = validate_json_output(response, CampaignBrief) + + if success: + print("✓ Valid on first attempt!") + return result + + print(f"✗ Validation failed: {result}") + print() + + retries = 0 + last_error = result + + while retries < max_retries: + print(f"Attempting repair {retries + 1}/{max_retries}...") + + repair_prompt = f""" +The JSON you provided had validation errors: + +{last_error} + +Please provide corrected JSON that fixes these errors. Remember: +- campaign_goal must be exactly "awareness", "engagement", or "conversion" +- All required fields must be present: campaign_name, target_audience, key_message, campaign_goal, call_to_action, channel_recommendations +- channel_recommendations must be a list of strings + +Respond with valid JSON only. Keep each string value to 1–2 sentences max. +""" + + response = conversation.chat_completion(repair_prompt, temperature=TEMPERATURE) + + print("Repair response:") + print(response) + print() + + success, result = validate_json_output(response, CampaignBrief) + + if success: + print("✓ Repair successful!") + return result + + last_error = result + print(f"✗ Still invalid: {last_error}") + print() + retries += 1 + + raise ValueError( + f"Could not generate valid campaign brief after {max_retries} attempts. Last error: {last_error}" + ) + + +def extract_product_info(factsheet, max_retries=3): + """Extract structured information from product factsheet.""" + conversation = ConversationManager() + + prompt = build_extract_prompt(factsheet) + response = conversation.chat_completion(prompt, temperature=TEMPERATURE) + + success, result = validate_json_output(response, ExtractedProductInfo) + if success: + return result + + retries = 0 + last_error = result + + while retries < max_retries: + repair_prompt = f""" +The JSON had errors: + +{last_error} + +Provide corrected JSON matching the required structure. +Respond with JSON only. Keep each string value to 1–2 sentences max. +Use [] for scarcity_factors if none. +""" + response = conversation.chat_completion(repair_prompt, temperature=TEMPERATURE) + + success, result = validate_json_output(response, ExtractedProductInfo) + if success: + return result + + last_error = result + retries += 1 + + raise ValueError(f"Could not extract product info after {max_retries} attempts. Last error: {last_error}") + + +def generate_campaign_brief_pipeline(factsheet, max_retries=3): + """Generate campaign brief using multi-step pipeline.""" + print("Step 1: Extracting product information...") + extracted = extract_product_info(factsheet, max_retries) + print(f"✓ Extracted info for: {extracted.product_name}") + + print("Step 2: Drafting campaign brief...") + conversation = ConversationManager() + + prompt = build_draft_prompt(extracted) + response = conversation.chat_completion(prompt, temperature=TEMPERATURE) + + print("Initial campaign brief response:") + print(response) + print() + + success, result = validate_json_output(response, CampaignBrief) + if success: + print("✓ Valid campaign brief generated") + return result + + print("Step 3: Repairing output...") + retries = 0 + last_error = result + + while retries < max_retries: + repair_prompt = f""" +The JSON had validation errors: + +{last_error} + +Provide corrected JSON. Remember: +- campaign_goal must be "awareness", "engagement", or "conversion" +- All required fields must be present +- channel_recommendations must be a list of strings + +Respond with JSON only. Keep each string value to 1–2 sentences max. +""" + response = conversation.chat_completion(repair_prompt, temperature=TEMPERATURE) + + success, result = validate_json_output(response, CampaignBrief) + if success: + print("✓ Repair successful") + return result + + last_error = result + retries += 1 + + raise ValueError(f"Could not generate valid brief after {max_retries} repair attempts. Last error: {last_error}") + + +if __name__ == "__main__": + factsheet = """ +Product: Limited Edition Geisha Reserve +Origin: Hacienda La Esmeralda, Panama +Altitude: 1,600-1,800 meters +Processing: Natural, 72-hour fermentation +Flavor Profile: Jasmine, bergamot, white peach, honey sweetness, +silky body, complex finish with hints of tropical fruit +Certifications: Single Estate, Competition Grade +Limited Production: Only 500 bags produced this season +Story: This micro-lot scored 94.1 points in the 2024 Cup of Excellence +competition. The beans come from 30-year-old Geisha trees grown in +volcanic soil. The extended fermentation process was developed +specifically for this lot to enhance the floral characteristics. +Price: $89.99/bag +Previous Customer Feedback: "Best coffee I've ever tasted" - Coffee +Review Magazine. Sold out in 3 days last year. +""".strip() + + try: + brief = generate_campaign_brief_pipeline(factsheet) + print("✓ Generated valid campaign brief!") + print(f"Campaign: {brief.campaign_name}") + print(f"Target: {brief.target_audience}") + print(f"Goal: {brief.campaign_goal.value}") + print(f"Channels: {', '.join(brief.channel_recommendations)}") + except ValueError as e: + print(f"✗ Failed to generate brief: {e}") diff --git a/advanced-prompting-patterns/solution-files/conversation_manager.py b/advanced-prompting-patterns/solution-files/conversation_manager.py new file mode 100644 index 0000000..53792d6 --- /dev/null +++ b/advanced-prompting-patterns/solution-files/conversation_manager.py @@ -0,0 +1,117 @@ +import os +import tiktoken +import json +from openai import OpenAI +from datetime import datetime + +DEFAULT_API_KEY = os.environ.get("TOGETHER_API_KEY") +DEFAULT_BASE_URL = "https://api.together.xyz/v1" +DEFAULT_MODEL = "meta-llama/Meta-Llama-3-8B-Instruct-Lite" +DEFAULT_TEMPERATURE = 0.7 +DEFAULT_MAX_TOKENS = 350 +DEFAULT_TOKEN_BUDGET = 4096 + + +class ConversationManager: + def __init__(self, api_key=None, base_url=None, model=None, history_file=None, temperature=None, max_tokens=None, token_budget=None): + if not api_key: + api_key = DEFAULT_API_KEY + if not base_url: + base_url = DEFAULT_BASE_URL + + self.client = OpenAI( + api_key=api_key, + base_url=base_url + ) + if history_file is None: + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + self.history_file = f"conversation_history_{timestamp}.json" + else: + self.history_file = history_file + + self.model = model if model else DEFAULT_MODEL + self.temperature = temperature if temperature else DEFAULT_TEMPERATURE + self.max_tokens = max_tokens if max_tokens else DEFAULT_MAX_TOKENS + self.token_budget = token_budget if token_budget else DEFAULT_TOKEN_BUDGET + + self.system_messages = { + "blogger": "You are a creative blogger specializing in engaging and informative content for GlobalJava Roasters.", + "social_media_expert": "You are a social media expert, crafting catchy and shareable posts for GlobalJava Roasters.", + "creative_assistant": "You are a creative assistant skilled in crafting engaging marketing content for GlobalJava Roasters.", + "custom": "Enter your custom system message here." + } + self.system_message = self.system_messages["creative_assistant"] + self.conversation_history = [{"role": "system", "content": self.get_system_message()}] + + def count_tokens(self, text): + try: + encoding = tiktoken.encoding_for_model(self.model) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + + tokens = encoding.encode(text) + return len(tokens) + + def total_tokens_used(self): + return sum(self.count_tokens(message['content']) for message in self.conversation_history) + + def enforce_token_budget(self): + while self.total_tokens_used() > self.token_budget: + if len(self.conversation_history) <= 1: + break + self.conversation_history.pop(1) + + def set_persona(self, persona): + if persona in self.system_messages: + self.system_message = self.system_messages[persona] + self.update_system_message_in_history() + else: + raise ValueError(f"Unknown persona: {persona}. Available personas are: {list(self.system_messages.keys())}") + + def set_custom_system_message(self, custom_message): + if not custom_message: + raise ValueError("Custom message cannot be empty.") + self.system_messages['custom'] = custom_message + self.set_persona('custom') + + def get_system_message(self): + system_message = self.system_message + system_message += f"\nImportant: Tailor your response to fit within {DEFAULT_MAX_TOKENS/2} word limit\n" + return system_message + + def update_system_message_in_history(self): + if self.conversation_history and self.conversation_history[0]["role"] == "system": + self.conversation_history[0]["content"] = self.get_system_message() + else: + system_message = self.system_message + system_message += f"\nImportant: Tailor your response to fit within {DEFAULT_MAX_TOKENS/2} words limit\n" + self.conversation_history.insert(0, {"role": "system", "content": self.get_system_message()}) + + def chat_completion(self, prompt, temperature=None, max_tokens=None): + temperature = temperature if temperature is not None else self.temperature + max_tokens = max_tokens if max_tokens is not None else self.max_tokens + + self.conversation_history.append({"role": "user", "content": prompt}) + + self.enforce_token_budget() + + try: + response = self.client.chat.completions.create( + model=self.model, + messages=self.conversation_history, + temperature=temperature, + max_tokens=max_tokens, + ) + except Exception as e: + print(f"An error occurred while generating a response: {e}") + return None + + ai_response = response.choices[0].message.content + self.conversation_history.append({"role": "assistant", "content": ai_response}) + + return ai_response + + def reset_conversation_history(self): + self.conversation_history = [{"role": "system", "content": self.self.get_system_message()}] + + diff --git a/advanced-prompting-patterns/solution-files/models.py b/advanced-prompting-patterns/solution-files/models.py new file mode 100644 index 0000000..bd02ff8 --- /dev/null +++ b/advanced-prompting-patterns/solution-files/models.py @@ -0,0 +1,27 @@ +from pydantic import BaseModel, ConfigDict +from typing import List +from enum import Enum + +class CampaignGoal(str, Enum): + AWARENESS = "awareness" + ENGAGEMENT = "engagement" + CONVERSION = "conversion" + +class CampaignBrief(BaseModel): + model_config = ConfigDict(extra="forbid") + campaign_name: str + target_audience: str + key_message: str + campaign_goal: CampaignGoal + call_to_action: str + channel_recommendations: List[str] + +class ExtractedProductInfo(BaseModel): + model_config = ConfigDict(extra="forbid") + product_name: str + origin_story: str + unique_features: List[str] + flavor_highlights: List[str] + certifications: List[str] + price_point: str + scarcity_factors: List[str] = [] diff --git a/advanced-prompting-patterns/solution-files/prompt_templates.py b/advanced-prompting-patterns/solution-files/prompt_templates.py new file mode 100644 index 0000000..313bab3 --- /dev/null +++ b/advanced-prompting-patterns/solution-files/prompt_templates.py @@ -0,0 +1,139 @@ +CAMPAIGN_SYSTEM_V2 = """ +PROMPT_ID: campaign_brief_generator +VERSION: 2.0 +LAST_UPDATED: 2026-01-08 +CHANGELOG: +- v2.0: Added specific channel guidance for premium products +- v1.1: Clarified target audience requirements +- v1.0: Initial version + +You are a marketing campaign strategist for GlobalJava Roasters, +a premium coffee company focused on quality and sustainability. +""" + +CAMPAIGN_SYSTEM = """ +You are a marketing campaign strategist for GlobalJava Roasters, +a premium coffee company focused on quality and sustainability. +""" + +CAMPAIGN_TASK = """ +Create a marketing campaign brief for the product described below. +""" + +CAMPAIGN_CONSTRAINTS = """ +Follow these rules: +- Base all claims on the provided product information +- Use professional but engaging language appropriate for coffee enthusiasts +- Focus on the product's unique characteristics and value proposition +- Recommend marketing channels suitable for premium coffee consumers +""" + +CAMPAIGN_OUTPUT = """ +Output your response as valid JSON with this structure: + +{{ + "campaign_name": "string", + "target_audience": "string", + "key_message": "string", + "campaign_goal": "awareness" | "engagement" | "conversion", + "call_to_action": "string", + "channel_recommendations": ["string", "string", ...] +}} + +Respond with JSON only. No explanations or markdown. Keep each string value to 1–2 sentences max. +""" + +def build_campaign_prompt(factsheet): + """Build a campaign brief prompt from template blocks.""" + reference = f"Product Information:\n{factsheet}" + + return f""" +{CAMPAIGN_SYSTEM} + +{CAMPAIGN_TASK} + +{CAMPAIGN_CONSTRAINTS} + +{reference} + +{CAMPAIGN_OUTPUT} +""".strip() + +EXTRACT_SYSTEM = "You are a data extraction specialist." + +EXTRACT_TASK = """ +Extract key marketing-relevant information from the product factsheet below. +Focus on facts that would matter for a marketing campaign. +""" + +EXTRACT_OUTPUT = """ +Output JSON with this structure: +{{ + "product_name": "string", + "origin_story": "string", + "unique_features": ["string", "string", ...], + "flavor_highlights": ["string", "string", ...], + "certifications": ["string", "string", ...], + "price_point": "budget" | "mid-range" | "premium" | "luxury", + "scarcity_factors": ["string", ...] // use [] if none +}} + +Respond with JSON only. +""" + +def build_extract_prompt(factsheet): + reference = f"Product Factsheet:\n{factsheet}" + return f""" +{EXTRACT_SYSTEM} + +{EXTRACT_TASK} + +{reference} + +{EXTRACT_OUTPUT} +""".strip() + + +DRAFT_CAMPAIGN_SYSTEM = """ +You are a marketing campaign strategist for GlobalJava Roasters. +""" + +DRAFT_CAMPAIGN_TASK = """ +Create a compelling marketing campaign brief using the extracted +product information provided below. +""" + +DRAFT_CAMPAIGN_CONSTRAINTS = """ +Guidelines: +- Craft a campaign name that captures the product's essence +- Target audience should reflect the price point and product characteristics +- Key message should highlight the most compelling unique features +- Choose an appropriate campaign goal based on product positioning +- Create a clear, actionable call-to-action +- Recommend channels that reach premium coffee enthusiasts +""" + +def build_draft_prompt(extracted_info): + # Format the extracted info nicely + info_text = f""" +Product: {extracted_info.product_name} +Origin Story: {extracted_info.origin_story} +Unique Features: {', '.join(extracted_info.unique_features)} +Flavor Highlights: {', '.join(extracted_info.flavor_highlights)} +Price Point: {extracted_info.price_point} +""" + if extracted_info.scarcity_factors: + info_text += f"Scarcity: {', '.join(extracted_info.scarcity_factors)}\n" + + return f""" +{DRAFT_CAMPAIGN_SYSTEM} + +{DRAFT_CAMPAIGN_TASK} + +{DRAFT_CAMPAIGN_CONSTRAINTS} + +Extracted Product Information: +{info_text} + +{CAMPAIGN_OUTPUT} +""".strip() \ No newline at end of file diff --git a/advanced-prompting-patterns/solution-files/test_prompts.py b/advanced-prompting-patterns/solution-files/test_prompts.py new file mode 100644 index 0000000..ee61373 --- /dev/null +++ b/advanced-prompting-patterns/solution-files/test_prompts.py @@ -0,0 +1,48 @@ +from campaign_generator import generate_campaign_brief +from models import CampaignGoal + +def test_basic_generation(): + """Can we generate a valid brief at all?""" + factsheet = """ + Product: House Blend Medium Roast + Origin: Colombia and Brazil blend + Flavor: Balanced, chocolatey, nutty + Price: $12.99/bag + """ + + brief = generate_campaign_brief(factsheet) + + assert brief.campaign_name, "Campaign name missing" + assert len(brief.campaign_name) >= 5, "Campaign name too short" + assert brief.target_audience, "Target audience missing" + assert brief.campaign_goal in CampaignGoal, "Invalid campaign goal" + assert len(brief.channel_recommendations) > 0, "No channels recommended" + + print("✓ Basic generation test passed") + +def test_premium_product_targeting(): + """Premium products should target sophisticated audiences.""" + factsheet = """ + Product: Single Origin Ethiopian Yirgacheffe + Origin: Gedeb region, Ethiopia + Processing: Washed + Flavor: Floral, citrus, tea-like + Certifications: Organic, Fair Trade + Price: $24.99/bag + """ + + brief = generate_campaign_brief(factsheet) + + target = brief.target_audience.lower() + sophisticated_terms = ['enthusiast', 'connoisseur', 'specialty', 'premium', 'aficionado'] + + assert any(term in target for term in sophisticated_terms), \ + f"Premium product should target sophisticated audience, got: {brief.target_audience}" + + print("✓ Premium targeting test passed") + +if __name__ == "__main__": + print("Running prompt tests...\n") + test_basic_generation() + test_premium_product_targeting() + print("\n✓ All tests passed!") diff --git a/advanced-prompting-patterns/solution-files/utils.py b/advanced-prompting-patterns/solution-files/utils.py new file mode 100644 index 0000000..d6132fb --- /dev/null +++ b/advanced-prompting-patterns/solution-files/utils.py @@ -0,0 +1,26 @@ +import json +from pydantic import ValidationError + +def validate_json_output(response_text, model_class): + """ + Parse JSON from LLM response and validate against Pydantic model. + Returns (success, result_or_errors) + """ + try: + # Extract JSON (handles markdown code blocks if present) + json_text = response_text.strip() + if json_text.startswith("```json"): + json_text = json_text.split("```json")[1].split("```")[0].strip() + elif json_text.startswith("```"): + json_text = json_text.split("```")[1].split("```")[0].strip() + + # Parse and validate + data = json.loads(json_text) + validated = model_class(**data) + return True, validated + + except json.JSONDecodeError as e: + return False, f"JSON parsing error: {str(e)}" + + except ValidationError as e: + return False, f"Validation errors: {e.errors()}" \ No newline at end of file