Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 0318a5b

Browse filesBrowse files
committed
Minor fix
1 parent 0945718 commit 0318a5b
Copy full SHA for 0318a5b

15 files changed

+46-382Lines changed: 46 additions & 382 deletions
Expand file treeCollapse file tree
Open diff view settings
Collapse file

‎README.md‎

Copy file name to clipboardExpand all lines: README.md
+5-5Lines changed: 5 additions & 5 deletions
  • Display the source diff
  • Display the rich diff
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ from medea import medea, AgentLLM, LLMConfig
140140
from medea import ResearchPlanning, Analysis, LiteratureReasoning
141141
from medea import (
142142
ResearchPlanDraft, ContextVerification, IntegrityVerification,
143-
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQulityChecker,
143+
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQualityChecker,
144144
LiteratureSearch, PaperJudge, OpenScholarReasoning
145145
)
146146

@@ -162,7 +162,7 @@ analysis_actions = [
162162
CodeGenerator(tmp=0.4, llm_provider=backbone_llm),
163163
AnalysisExecution(),
164164
CodeDebug(tmp=0.4, llm_provider=backbone_llm),
165-
AnalysisQulityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
165+
AnalysisQualityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
166166
]
167167

168168
literature_actions = [
@@ -203,7 +203,7 @@ from medea import experiment_analysis, AgentLLM, LLMConfig
203203
from medea import ResearchPlanning, Analysis
204204
from medea import (
205205
ResearchPlanDraft, ContextVerification, IntegrityVerification,
206-
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQulityChecker
206+
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQualityChecker
207207
)
208208

209209
# Step 1: Initialize LLMs
@@ -223,7 +223,7 @@ analysis_actions = [
223223
CodeGenerator(tmp=0.4, llm_provider=backbone_llm),
224224
AnalysisExecution(),
225225
CodeDebug(tmp=0.4, llm_provider=backbone_llm),
226-
AnalysisQulityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
226+
AnalysisQualityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
227227
]
228228

229229
# Step 3: Create modules
@@ -349,7 +349,7 @@ python main.py --task immune_response --patient-tpm-root /path/to/tpm/data
349349
# Custom temperature (LLM temperature for all modules)
350350
python main.py --temperature 0.7
351351

352-
# Custom quality iterations (3 max iteration for IntegrityVerification, 3 max iteration for AnalysisQulityChecker)
352+
# Custom quality iterations (3 max iteration for IntegrityVerification, 3 max iteration for AnalysisQualityChecker)
353353
python main.py --quality-max-iter 3 --code-quality-max-iter 3
354354

355355
# Custom debate rounds
Collapse file

‎examples/README.md‎

Copy file name to clipboardExpand all lines: examples/README.md
+2-2Lines changed: 2 additions & 2 deletions
  • Display the source diff
  • Display the rich diff
Original file line numberDiff line numberDiff line change
@@ -91,10 +91,10 @@ analysis_llm = AgentLLM(LLMConfig({"temperature": 0.5}), llm_name="claude")
9191

9292
**Max iterations**:
9393
```python
94-
from medea import IntegrityVerification, AnalysisQulityChecker
94+
from medea import IntegrityVerification, AnalysisQualityChecker
9595

9696
IntegrityVerification(max_iter=3) # Research plan quality
97-
AnalysisQulityChecker(max_iter=2) # Code quality
97+
AnalysisQualityChecker(max_iter=2) # Code quality
9898
```
9999

100100
## Troubleshooting
Collapse file

‎examples/custom_workflow.py‎

Copy file name to clipboardExpand all lines: examples/custom_workflow.py
+3-3Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
CodeGenerator,
2929
AnalysisExecution,
3030
CodeDebug,
31-
AnalysisQulityChecker,
31+
AnalysisQualityChecker,
3232
# Literature reasoning actions
3333
LiteratureSearch,
3434
PaperJudge,
@@ -100,7 +100,7 @@ def example_2_experiment_analysis_system():
100100
CodeGenerator(tmp=0.4, llm_provider="gpt-4o"),
101101
AnalysisExecution(),
102102
CodeDebug(tmp=0.4, llm_provider="gpt-4o"),
103-
AnalysisQulityChecker(tmp=0.4, llm_provider="gpt-4o", max_iter=2),
103+
AnalysisQualityChecker(tmp=0.4, llm_provider="gpt-4o", max_iter=2),
104104
]
105105

106106
# Initialize agents
@@ -182,7 +182,7 @@ def example_4_custom_temperature():
182182
CodeGenerator(tmp=0.5, llm_provider="gpt-4o"),
183183
AnalysisExecution(),
184184
CodeDebug(tmp=0.5, llm_provider="gpt-4o"),
185-
AnalysisQulityChecker(tmp=0.5, llm_provider="gpt-4o", max_iter=1),
185+
AnalysisQualityChecker(tmp=0.5, llm_provider="gpt-4o", max_iter=1),
186186
]
187187

188188
# Initialize agents
Collapse file

‎examples/quickstart.py‎

Copy file name to clipboardExpand all lines: examples/quickstart.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
CodeGenerator,
2727
AnalysisExecution,
2828
CodeDebug,
29-
AnalysisQulityChecker,
29+
AnalysisQualityChecker,
3030
# Literature reasoning actions
3131
LiteratureSearch,
3232
PaperJudge,
@@ -85,7 +85,7 @@ def main():
8585
CodeGenerator(tmp=temperature, llm_provider=os.getenv("BACKBONE_LLM", "gpt-4o")),
8686
AnalysisExecution(),
8787
CodeDebug(tmp=temperature, llm_provider=os.getenv("BACKBONE_LLM", "gpt-4o")),
88-
AnalysisQulityChecker(tmp=temperature, llm_provider=os.getenv("BACKBONE_LLM", "gpt-4o"), max_iter=2),
88+
AnalysisQualityChecker(tmp=temperature, llm_provider=os.getenv("BACKBONE_LLM", "gpt-4o"), max_iter=2),
8989
]
9090

9191
# Literature reasoning actions
Collapse file

‎main.py‎

Copy file name to clipboardExpand all lines: main.py
+3-3Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
from medea.modules.agent_llms import LLMConfig, AgentLLM
4545

4646
# Agent implementations
47-
from medea.modules.experiment_analysis import Analysis, CodeDebug, AnalysisExecution, CodeGenerator, AnalysisQulityChecker
47+
from medea.modules.experiment_analysis import Analysis, CodeDebug, AnalysisExecution, CodeGenerator, AnalysisQualityChecker
4848
from medea.modules.discussion import multi_round_discussion
4949
from medea.modules.literature_reasoning import LiteratureSearch, OpenScholarReasoning, PaperJudge, LiteratureReasoning
5050
from medea.modules.research_planning import ContextVerification, ResearchPlanDraft, IntegrityVerification, ResearchPlanning
@@ -401,7 +401,7 @@ def parse_arguments():
401401

402402
# Iteration limits
403403
QUALITY_MAX_ITER = args.quality_max_iter
404-
CODE_QAULITY_MAX_ITER = args.code_quality_max_iter
404+
CODE_QUALITY_MAX_ITER = args.code_quality_max_iter
405405

406406
# Panel discussion settings
407407
DEBATE_ROUND = args.debate_rounds
@@ -472,7 +472,7 @@ def medea_unittest(df, user_template=None, agent_template=None):
472472
CodeGenerator(tmp=analysis_act_tmp, llm_provider=backbone_llm),
473473
AnalysisExecution(),
474474
CodeDebug(tmp=analysis_act_tmp, llm_provider=backbone_llm),
475-
AnalysisQulityChecker(tmp=analysis_act_tmp, llm_provider=backbone_llm, max_iter=CODE_QAULITY_MAX_ITER),
475+
AnalysisQualityChecker(tmp=analysis_act_tmp, llm_provider=backbone_llm, max_iter=CODE_QUALITY_MAX_ITER),
476476
]
477477

478478
print("=== Init Literature Reasoning Actions ===", flush=True)
Collapse file

‎medea.egg-info/PKG-INFO‎

Copy file name to clipboardExpand all lines: medea.egg-info/PKG-INFO
+5-5Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ from medea import medea, AgentLLM, LLMConfig
254254
from medea import ResearchPlanning, Analysis, LiteratureReasoning
255255
from medea import (
256256
ResearchPlanDraft, ContextVerification, IntegrityVerification,
257-
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQulityChecker,
257+
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQualityChecker,
258258
LiteratureSearch, PaperJudge, OpenScholarReasoning
259259
)
260260

@@ -276,7 +276,7 @@ analysis_actions = [
276276
CodeGenerator(tmp=0.4, llm_provider=backbone_llm),
277277
AnalysisExecution(),
278278
CodeDebug(tmp=0.4, llm_provider=backbone_llm),
279-
AnalysisQulityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
279+
AnalysisQualityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
280280
]
281281

282282
literature_actions = [
@@ -317,7 +317,7 @@ from medea import experiment_analysis, AgentLLM, LLMConfig
317317
from medea import ResearchPlanning, Analysis
318318
from medea import (
319319
ResearchPlanDraft, ContextVerification, IntegrityVerification,
320-
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQulityChecker
320+
CodeGenerator, AnalysisExecution, CodeDebug, AnalysisQualityChecker
321321
)
322322

323323
# Step 1: Initialize LLMs
@@ -337,7 +337,7 @@ analysis_actions = [
337337
CodeGenerator(tmp=0.4, llm_provider=backbone_llm),
338338
AnalysisExecution(),
339339
CodeDebug(tmp=0.4, llm_provider=backbone_llm),
340-
AnalysisQulityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
340+
AnalysisQualityChecker(tmp=0.4, llm_provider=backbone_llm, max_iter=2)
341341
]
342342

343343
# Step 3: Create modules
@@ -463,7 +463,7 @@ python main.py --task immune_response --patient-tpm-root /path/to/tpm/data
463463
# Custom temperature (LLM temperature for all modules)
464464
python main.py --temperature 0.7
465465

466-
# Custom quality iterations (3 max iteration for IntegrityVerification, 3 max iteration for AnalysisQulityChecker)
466+
# Custom quality iterations (3 max iteration for IntegrityVerification, 3 max iteration for AnalysisQualityChecker)
467467
python main.py --quality-max-iter 3 --code-quality-max-iter 3
468468

469469
# Custom debate rounds
Collapse file

‎medea/__init__.py‎

Copy file name to clipboardExpand all lines: medea/__init__.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
CodeGenerator,
3232
AnalysisExecution,
3333
CodeDebug,
34-
AnalysisQulityChecker
34+
AnalysisQualityChecker
3535
)
3636

3737
from .modules.literature_reasoning import (
@@ -68,7 +68,7 @@
6868
'CodeGenerator',
6969
'AnalysisExecution',
7070
'CodeDebug',
71-
'AnalysisQulityChecker',
71+
'AnalysisQualityChecker',
7272

7373
# Literature reasoning actions
7474
'LiteratureSearch',
Collapse file

‎medea/modules/agent_llms.py‎

Copy file name to clipboardExpand all lines: medea/modules/agent_llms.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def run(
121121
temperature=self.temperature,
122122
model=self.model,
123123
mod='chat', # Already in message format
124-
attemps=max_attempts,
124+
attempts=max_attempts,
125125
use_openrouter=self.use_openrouter
126126
)
127127
return response
Collapse file

‎medea/modules/experiment_analysis.py‎

Copy file name to clipboardExpand all lines: medea/modules/experiment_analysis.py
+3-3Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@ def __call__(self, code_snippet: CodeSnippet):
429429
return f"{code_snippet}: debugged, call AnalysisExecution next."
430430

431431

432-
class AnalysisQulityChecker(BaseAction):
432+
class AnalysisQualityChecker(BaseAction):
433433
"""Checks code quality and provides feedback for improvement."""
434434

435435
def __init__(self, llm_provider: str = None, tmp: float = 0.4, max_iter: int = 3) -> None:
@@ -554,7 +554,7 @@ def __init__(
554554
CodeGenerator(llm_provider=os.getenv("BACKBONE_LLM")),
555555
AnalysisExecution(),
556556
CodeDebug(llm_provider=os.getenv("BACKBONE_LLM")),
557-
AnalysisQulityChecker(llm_provider=os.getenv("BACKBONE_LLM")),
557+
AnalysisQualityChecker(llm_provider=os.getenv("BACKBONE_LLM")),
558558
]
559559

560560
name = "analysis_agent"
@@ -783,5 +783,5 @@ def forward(self, task: TaskPackage, agent_act: AgentAct) -> str:
783783
return observation
784784
if param_parse_flag:
785785
return WRONG_ACTION_PARAM
786-
return ACION_NOT_FOUND_MESS
786+
return ACTION_NOT_FOUND_MESS
787787

Collapse file

‎medea/modules/literature_reasoning.py‎

Copy file name to clipboardExpand all lines: medea/modules/literature_reasoning.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from agentlite.agents import ABCAgent, BaseAgent
44
from agentlite.commons import AgentAct, TaskPackage
55
from agentlite.commons.AgentAct import ActObsChainType
6-
from agentlite.agents.agent_utils import act_match, ACION_NOT_FOUND_MESS
6+
from agentlite.agents.agent_utils import act_match, ACTION_NOT_FOUND_MESS
77
from typing import List, Dict, Any
88
import os
99
import torch
@@ -544,7 +544,7 @@ def forward(self, task: TaskPackage, agent_act: AgentAct):
544544
return observation
545545
if param_parse_flag:
546546
return WRONG_ACTION_PARAM
547-
return ACION_NOT_FOUND_MESS
547+
return ACTION_NOT_FOUND_MESS
548548

549549
def __add_inner_actions__(self):
550550
"""Add inner action types based on reasoning type."""

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.