-
Notifications
You must be signed in to change notification settings - Fork 174
Expand file tree
/
Copy pathagentframework_workflow.py
More file actions
183 lines (155 loc) · 6.26 KB
/
agentframework_workflow.py
File metadata and controls
183 lines (155 loc) · 6.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
# pip install agent-framework-devui==1.0.0b260212
import os
from typing import Any
from agent_framework import AgentExecutorResponse, ChatAgent, WorkflowBuilder
from agent_framework.openai import OpenAIChatClient
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
from dotenv import load_dotenv
from pydantic import BaseModel
# Configure OpenAI client based on environment
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST", "github")
if API_HOST == "azure":
async_credential = DefaultAzureCredential()
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
client = OpenAIChatClient(
base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/",
api_key=token_provider,
model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
)
elif API_HOST == "github":
client = OpenAIChatClient(
base_url="https://models.github.ai/inference",
api_key=os.environ["GITHUB_TOKEN"],
model_id=os.getenv("GITHUB_MODEL", "openai/gpt-4o"),
)
elif API_HOST == "ollama":
client = OpenAIChatClient(
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
api_key="none",
model_id=os.environ.get("OLLAMA_MODEL", "llama3.1:latest"),
)
else:
client = OpenAIChatClient(api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-4o"))
# Define structured output for review results
class ReviewResult(BaseModel):
"""Review evaluation with scores and feedback."""
score: int # Overall quality score (0-100)
feedback: str # Concise, actionable feedback
clarity: int # Clarity score (0-100)
completeness: int # Completeness score (0-100)
accuracy: int # Accuracy score (0-100)
structure: int # Structure score (0-100)
# Condition function: route to editor if score < 80
def needs_editing(message: Any) -> bool:
"""Check if content needs editing based on review score."""
if not isinstance(message, AgentExecutorResponse):
return False
try:
review = ReviewResult.model_validate_json(message.agent_response.text)
return review.score < 80
except Exception:
return False
# Condition function: content is approved (score >= 80)
def is_approved(message: Any) -> bool:
"""Check if content is approved (high quality)."""
if not isinstance(message, AgentExecutorResponse):
return True
try:
review = ReviewResult.model_validate_json(message.agent_response.text)
return review.score >= 80
except Exception:
return True
# Create Writer agent - generates content
def create_writer():
return client.as_agent(
name="Writer",
instructions=(
"You are an excellent content writer. "
"Create clear, engaging content based on the user's request. "
"Focus on clarity, accuracy, and proper structure."
),
)
# Create Reviewer agent - evaluates and provides structured feedback
def create_reviewer():
return client.as_agent(
name="Reviewer",
instructions=(
"You are an expert content reviewer. "
"Evaluate the writer's content based on:\n"
"1. Clarity - Is it easy to understand?\n"
"2. Completeness - Does it fully address the topic?\n"
"3. Accuracy - Is the information correct?\n"
"4. Structure - Is it well-organized?\n\n"
"Return a JSON object with:\n"
"- score: overall quality (0-100)\n"
"- feedback: concise, actionable feedback\n"
"- clarity, completeness, accuracy, structure: individual scores (0-100)"
),
response_format=ReviewResult,
)
# Create Editor agent - improves content based on feedback
def create_editor():
return client.as_agent(
name="Editor",
instructions=(
"You are a skilled editor. "
"You will receive content along with review feedback. "
"Improve the content by addressing all the issues mentioned in the feedback. "
"Maintain the original intent while enhancing clarity, completeness, accuracy, and structure."
),
)
# Create Publisher agent - formats content for publication
def create_publisher():
return client.as_agent(
name="Publisher",
instructions=(
"You are a publishing agent. "
"You receive either approved content or edited content. "
"Format it for publication with proper headings and structure."
),
)
# Create Summarizer agent - creates final publication report
def create_summarizer():
return client.as_agent(
name="Summarizer",
instructions=(
"You are a summarizer agent. "
"Create a final publication report that includes:\n"
"1. A brief summary of the published content\n"
"2. The workflow path taken (direct approval or edited)\n"
"3. Key highlights and takeaways\n"
"Keep it concise and professional."
),
)
# Build workflow with branching and convergence:
# Writer → Reviewer → [branches]:
# - If score >= 80: → Publisher → Summarizer (direct approval path)
# - If score < 80: → Editor → Publisher → Summarizer (improvement path)
# Both paths converge at Summarizer for final report
writer = create_writer()
reviewer = create_reviewer()
editor = create_editor()
publisher = create_publisher()
summarizer = create_summarizer()
workflow = (
WorkflowBuilder(
name="Content Review Workflow",
description="Multi-agent content creation with quality-based routing (Writer→Reviewer→Editor/Publisher)",
start_executor=writer,
)
.add_edge(writer, reviewer)
# Branch 1: High quality (>= 80) goes directly to publisher
.add_edge(reviewer, publisher, condition=is_approved)
# Branch 2: Low quality (< 80) goes to editor first, then publisher
.add_edge(reviewer, editor, condition=needs_editing)
.add_edge(editor, publisher)
# Both paths converge: Publisher → Summarizer
.add_edge(publisher, summarizer)
.build()
)
def main():
from agent_framework.devui import serve
serve(entities=[workflow], port=8093, auto_open=True)
if __name__ == "__main__":
main()