import murnitur
import anthropic
from openai import OpenAI
def generate_yoda_response():
client = anthropic.Anthropic()
message = client.messages.create(
model="claude-3-opus-20240229",
max_tokens=1000,
temperature=0.0,
system="Respond only in Yoda-speak.",
messages=[
{"role": "user", "content": "How are you today?"}
]
)
return message.content[0].text
@murnitur.trace
def detect_yoda_response():
client = OpenAI()
yoda_response = generate_yoda_response()
result = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=1,
messages=[
{
"role": "system",
"content": "Detect if the response was in Yoda-speak.",
},
{"role": "user", "content": f"Response: {yoda_response}"},
],
max_tokens=300,
)
return result.choices[0].message.content
if __name__ == "__main__":
murnitur.set_api_key("mt-ey...")
murnitur.init("murnix-trace", murnitur.Environment.DEVELOPMENT)
print(detect_yoda_response())