Skip to content

Navigation Menu

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 0c77121

Browse filesBrowse files
introduced react score and some assumptions.
1 parent 92e91f9 commit 0c77121
Copy full SHA for 0c77121

File tree

1 file changed

+20
-8
lines changed
Filter options

1 file changed

+20
-8
lines changed

‎aimon/extensions/react.py

Copy file name to clipboardExpand all lines: aimon/extensions/react.py
+20-8
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,21 @@ def detect_aimon_response(self,aimon_payload):
6161

6262
## ReAct -> Reason and Act
6363
def react(self, user_query, user_instructions,):
64-
64+
"""
65+
AIMon-ReAct -> Reason and Act with AIMon
66+
67+
where ReAct score:
68+
1 if after n attempts of ReAct, the LLM follows instructions and generates a non hallucinated response.
69+
0.5 if after n attempts of ReAct, either the LLM response follows user_instructions or is under hallucination threshold [BUT NOT BOTH].
70+
0 otherwise.
71+
"""
6572
result = {
66-
'response': [],
67-
'hscore':[],
68-
'adherence':True ## True by default
73+
'responses': [],
74+
'hallucination_scores': [],
75+
'adherence': False, ## False by assumption
76+
'react_score': 0 ## 0 by assumption
6977
}
70-
78+
7179
llm_response = self.llm_app(user_query, user_instructions, reprompted_flag=False)
7280

7381
context = self.context_extractor(user_query, user_instructions, llm_response)
@@ -127,19 +135,23 @@ def react(self, user_query, user_instructions,):
127135

128136
else:
129137
break
130-
131-
138+
139+
132140
if hallucination_score > self.react_configuration.hallucination_threshold and result['adherence']==False:
133141
result['response'].append(f"Even after {self.react_configuration.max_attempts} attempts of AIMon react, the LLM neither adheres to the user instructions, nor generates a response that is not hallucinated. Final LLM response: {generated_text}")
134142
result['hscore'].append(hallucination_score)
143+
result['react_score']=0
135144
elif hallucination_score > self.react_configuration.hallucination_threshold and result['adherence']==True:
136145
result['response'].append(f"Although the LLM adheres to the user instructions, the generated response, even after {self.react_configuration.max_attempts} attempts of AIMon ReAct is still hallucinated. Final LLM response: {generated_text}")
137146
result['hscore'].append(hallucination_score)
147+
result['react_score']=0.5
138148
elif hallucination_score <= self.react_configuration.hallucination_threshold and result['adherence']==False:
139149
result['response'].append(f"Although the LLM generates a non-hallucinated response, it fails to adhere to the user instructions, even after {self.react_configuration.max_attempts} attempts of AIMon ReAct. Final LLM response: {generated_text}")
140150
result['hscore'].append(hallucination_score)
151+
result['react_score']=0.5
141152
else:
142-
result['response'].append(generated_text)
153+
result['response'].append(f"This response is below the hallucination threshold and adheres to the user instructions. Response {generated_text}")
143154
result['hscore'].append(hallucination_score)
155+
result['react_score']=0
144156

145157
return result

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.