from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
api_key = os.environ.get("GRAVIXLAYER_API_KEY")
llm = OpenAI(
api_key=api_key,
base_url="https://api.gravixlayer.com/v1/inference"
)
agents = [
{"role": "Summarizer", "task": "Summarize this: AI is transforming industries..."},
{"role": "Industry Analyst", "task": "List three industries most impacted by AI."},
{"role": "Healthcare Researcher", "task": "Suggest a research question about AI in healthcare."}
]
responses = []
for agent in agents:
response = llm.chat.completions.create(
model="meta-llama/llama-3.1-8b-instruct",
messages=[
{"role": "system", "content": f"You are a helpful assistant with the role: {agent['role']}"},
{"role": "user", "content": agent["task"]}
]
)
responses.append((agent["role"], response.choices[0].message.content))
for i, (role, r) in enumerate(responses):
print(f"Agent {i+1} ({role}) response: {r}\n")