@@ -777,21 +777,73 @@ def get_async_explainer():
777777 div_id = request .vars .div_id
778778
779779 messages = db (
780- (db .useinfo .event == "sendmessage" )
780+ (db .useinfo .event . belongs ([ "sendmessage" , "reflection" ]) )
781781 & (db .useinfo .div_id == div_id )
782782 & (db .useinfo .course_id == course_name )
783783 ).select (orderby = db .useinfo .id )
784784
785- if len (messages ) == 0 :
786- mess = "Sorry there are no explanations yet."
787- else :
788- parts = []
789- for row in messages :
785+ all_msgs = [] #list of (sid, msg) in insertion order
786+ last_per_sid = {}
787+ for row in messages :
788+ if row .event == "reflection" :
789+ msg = row .act
790+ else :
790791 try :
791792 msg = row .act .split (":" , 2 )[2 ]
792793 except Exception :
793794 msg = row .act
794- parts .append (f"<li><strong>{ row .sid } </strong> said: { msg } </li>" )
795+ if last_per_sid .get (row .sid ) != msg : #skip exact consecutive duplicates only
796+ all_msgs .append ((row .sid , msg ))
797+ last_per_sid [row .sid ] = msg
798+
799+ llm_turns = db (
800+ (db .useinfo .event == "pi_llm_turn" )
801+ & (db .useinfo .div_id == div_id )
802+ & (db .useinfo .course_id == course_name )
803+ ).select (orderby = db .useinfo .id )
804+
805+ llm_by_sid = {}
806+ for row in llm_turns :
807+ try :
808+ turn = json .loads (row .act )
809+ attempt_id = turn .get ("pi_attempt_id" , "" )
810+ turn_index = turn .get ("turn_index" , 0 )
811+ role = turn .get ("role" , "" )
812+ content = turn .get ("content" , "" )
813+ if row .sid not in llm_by_sid :
814+ llm_by_sid [row .sid ] = {}
815+ if attempt_id not in llm_by_sid [row .sid ]:
816+ llm_by_sid [row .sid ][attempt_id ] = []
817+ llm_by_sid [row .sid ][attempt_id ].append ((turn_index , role , content ))
818+ except Exception :
819+ pass
820+
821+ parts = []
822+ sids_with_llm_shown = set ()
823+ for sid , msg in all_msgs :
824+ parts .append (f"<li><strong>{ sid } </strong> said: { msg } </li>" )
825+ if sid in llm_by_sid and sid not in sids_with_llm_shown :
826+ sids_with_llm_shown .add (sid )
827+ latest_attempt = max (
828+ llm_by_sid [sid ].keys (),
829+ key = lambda a : max (t [0 ] for t in llm_by_sid [sid ][a ])
830+ )
831+ turns = sorted (llm_by_sid [sid ][latest_attempt ], key = lambda t : t [0 ])
832+ for _ , role , content in turns :
833+ if role == "assistant" :
834+ parts .append (f"<li><strong>LLM Peer</strong> said: { content } </li>" )
835+
836+ for sid , attempts in llm_by_sid .items ():
837+ if sid not in sids_with_llm_shown and not any (s == sid for s , _ in all_msgs ):
838+ latest_attempt = max (attempts .keys (), key = lambda a : max (t [0 ] for t in attempts [a ]))
839+ turns = sorted (attempts [latest_attempt ], key = lambda t : t [0 ])
840+ for _ , role , content in turns :
841+ if role == "assistant" :
842+ parts .append (f"<li><strong>LLM Peer</strong> said: { content } </li>" )
843+
844+ if not parts :
845+ mess = "Sorry there are no explanations yet."
846+ else :
795847 mess = "<ul>" + "" .join (parts ) + "</ul>"
796848
797849 logger .debug (f"Get message for { div_id } " )
@@ -885,20 +937,23 @@ def get_async_llm_reflection():
885937 "do not use new lines.\n "
886938 "do not sound like a teacher.\n "
887939 "do not explain step by step.\n "
888- "do not mention libraries formally.\n "
889940 "never say something is right or wrong.\n "
941+ "your answer can shift throughout the conversation\n "
890942 "never mention a choice letter as the correct answer.\n "
891943 "never clearly describe the final result of the code.\n "
892944 "never fully state what the program prints.\n "
893- "keep reasoning partial or uncertain.\n "
894945 "use common misconceptions relating to the specific problem.\n "
895946 "refer to code loosely like 'that line' or 'the loop' or 'the head' or 'the print'.\n "
896947 "often hedge with uncertainty.\n "
897- "never agree with the other student's interpretation even if it sounds correct.\n "
948+ # "never agree with the other student's interpretation even if it sounds correct.\n" #porter found when PI is adversarial students disengage
898949 "use content from the other multiple choice options in your reponses when needed\n "
899- "maintain a mistaken or incomplete mental model throughout the conversation. \n "
950+ "let your mental model shift slightly under pressure but keep reasoning partial and never fully resolve \n "
900951 "if the other student clearly sounds confident or repeats the same answer twice stop debating and tell them to vote again or submit it.\n "
901952 "do not continue reasoning after telling them to vote again.\n "
953+ "sometimes question whether you even read the code correctly before forming an opinion.\n "
954+ "occasionally bring up a wrong answer option as if it might be right without committing to it.\n "
955+ "pick an answer choice different than the one the student selected and ask the student to explain why it cannot be correct.\n "
956+ "show reasoning process not conclusions, think out loud rather than arriving anywhere.\n "
902957 "focus on reasoning not teaching.\n \n "
903958 )
904959
@@ -1048,35 +1103,39 @@ def send_lti_scores():
10481103def _llm_enabled ():
10491104 return bool (_get_course_openai_key ())
10501105
1051-
1052- # fetch the course-wide openai API key used to enable LLM-based async peer discussion (only works for openai currently)
1053- # def _get_course_openai_key():
1054- # try:
1055- # token_record = asyncio.get_event_loop().run_until_complete(
1056- # fetch_api_token(course_id=auth.user.course_id, provider="openai")
1057- # )
1058- # if token_record and token_record.token:
1059- # return token_record.token.strip()
1060- # except Exception:
1061- # logger.exception("Failed to fetch course-wide OpenAI token for peer LLM")
1062- # return ""
1106+ #fetch the course-wide openai API key used to enable LLM-based async peer discussion (only works for openai currently)
10631107def _get_course_openai_key ():
10641108 try :
10651109 course = db (db .courses .course_name == auth .user .course_name ).select ().first ()
10661110
10671111 if not course :
1068- logger .warning ("PEER LLM: no course row found" )
1112+ logger .warning ("PEER LLM: no course row found for %s" , auth . user . course_name )
10691113 return ""
1070- logger .warning (f"PEER LLM course_name={ auth .user .course_name } " )
1071- logger .warning (f"PEER LLM auth.user.course_id={ auth .user .course_id } " )
1072- logger .warning (f"PEER LLM resolved course.id={ course .id if course else None } " )
1073- token_record = asyncio .get_event_loop ().run_until_complete (
1074- fetch_api_token (course_id = course .id , provider = "openai" )
1075- )
10761114
1077- if token_record and token_record . token :
1078- return token_record . token . strip ( )
1115+ logger . warning ( "PEER LLM: looking up token for course_id=%s (%s)" ,
1116+ course . id , auth . user . course_name )
10791117
1118+ rows = db .executesql (
1119+ "SELECT token FROM api_tokens "
1120+ "WHERE course_id = %s AND provider = %s "
1121+ "ORDER BY last_used ASC NULLS FIRST LIMIT 1" ,
1122+ placeholders = [course .id , "openai" ],
1123+ )
1124+ logger .warning ("PEER LLM: executesql returned %d rows" , len (rows ) if rows else 0 )
1125+
1126+ if rows and rows [0 ][0 ]:
1127+ from cryptography .fernet import Fernet
1128+ secret = os .environ .get ("FERNET_SECRET" , "" ).strip ()
1129+ if not secret :
1130+ raise RuntimeError ("FERNET_SECRET environment variable is not set" )
1131+ f = Fernet (secret .encode () if isinstance (secret , str ) else secret )
1132+ encrypted = rows [0 ][0 ]
1133+ decrypted = f .decrypt (encrypted .encode ()).decode ().strip ()
1134+ logger .warning ("PEER LLM: decrypted key for course %s: %s****" ,
1135+ course .id , decrypted [:4 ])
1136+ return decrypted
1137+
1138+ logger .warning ("PEER LLM: no openai token found for course_id=%s" , course .id )
10801139 except Exception :
10811140 logger .exception ("Failed to fetch course-wide OpenAI token for peer LLM" )
10821141
0 commit comments