You can use the with statement to use a dedicated LLM. The DedicatedLLM class which automatically shuts down and deletes your instance as soon as the with block is left.
from cortecs_py import Cortecsfrom cortecs_py.integrations import DedicatedLLMcortecs =Cortecs()withDedicatedLLM(cortecs, 'neuralmagic--Meta-Llama-3.1-8B-Instruct-FP8')as llm: essay = llm.invoke('Write an essay about dynamic provisioning')print(essay.content)
CrewAI
Install crewAI
pip install crewai crewai-tools
Using DedicatedCrewBase in combination with DedicatedCrew allows you to easily run dynamically provisioned crews. For more information see the crew docs.
from crewai import Agent, Crew, Process, Taskfrom crewai.project import CrewBase, agent, crew, taskfrom cortecs_py.integrations import DedicatedCrew, DedicatedCrewBase@DedicatedCrewBaseclassExampleCrew():@agentdefresearcher(self) -> Agent:returnAgent( config=self.agents_config['researcher'],# tools=[MyCustomTool()], # Example of custom tool, loaded in the beginning of file verbose=True )@agentdefreporting_analyst(self) -> Agent:returnAgent( config=self.agents_config['reporting_analyst'], verbose=True )@taskdefresearch_task(self) -> Task:returnTask( config=self.tasks_config['research_task'], )@taskdefreporting_task(self) -> Task:returnTask( config=self.tasks_config['reporting_task'], output_file='report.md' )@crewdefcrew(self) -> Crew:returnDedicatedCrew( instance_id=self.instance_id, client=self.client, agents=self.agents, tasks=self.tasks, process=Process.sequential )