3737from __future__ import unicode_literals
3838from future .builtins .disabled import * # noqa
3939from future .builtins import * # noqa
40- from six import itervalues , iteritems
40+ from six import iterkeys , itervalues , iteritems
4141
4242import logging
4343
4646from functools import wraps
4747
4848import gevent .lock
49-
49+ from sqlalchemy import func
5050from sqlalchemy .exc import IntegrityError
51- from sqlalchemy .orm import joinedload
5251
5352from cms import ServiceCoord , get_service_shards
5453from cms .io import Executor , TriggeredService , rpc_method
55- from cms .db import SessionGen , Digest , Dataset , Submission , UserTest , \
56- get_submissions , get_submission_results , get_datasets_to_judge
54+ from cms .db import SessionGen , Digest , Dataset , Evaluation , Submission , \
55+ SubmissionResult , Testcase , UserTest , UserTestResult , get_submissions , \
56+ get_submission_results , get_datasets_to_judge
5757from cms .grading .Job import JobGroup
5858
5959from .esoperations import ESOperation , get_relevant_operations , \
@@ -477,47 +477,29 @@ def write_results(self, items):
477477 by_object_and_type [t ].append ((operation , result ))
478478
479479 with SessionGen () as session :
480- # Dictionary holding the objects we use repeatedly,
481- # indexed by id, to avoid querying them multiple times.
482- # TODO: this pattern is used in WorkerPool and should be
483- # abstracted away.
484- datasets = dict ()
485- subs = dict ()
486- srs = dict ()
487-
488480 for key , operation_results in iteritems (by_object_and_type ):
489481 type_ , object_id , dataset_id = key
490482
491- # Get dataset.
492- if dataset_id not in datasets :
493- datasets [dataset_id ] = session .query (Dataset )\
494- .filter (Dataset .id == dataset_id )\
495- .options (joinedload (Dataset .testcases ))\
496- .first ()
497- dataset = datasets [dataset_id ]
483+ dataset = Dataset .get_from_id (dataset_id , session )
498484 if dataset is None :
499485 logger .error ("Could not find dataset %d in the database." ,
500486 dataset_id )
501487 continue
502488
503- # Get submission or user test, and their results.
489+ # Get submission or user test results.
504490 if type_ in [ESOperation .COMPILATION , ESOperation .EVALUATION ]:
505- if object_id not in subs :
506- subs [object_id ] = \
507- Submission .get_from_id (object_id , session )
508- object_ = subs [object_id ]
491+ object_ = Submission .get_from_id (object_id , session )
509492 if object_ is None :
510493 logger .error ("Could not find submission %d "
511494 "in the database." , object_id )
512495 continue
513- result_id = (object_id , dataset_id )
514- if result_id not in srs :
515- srs [result_id ] = object_ .get_result_or_create (dataset )
516- object_result = srs [result_id ]
496+ object_result = object_ .get_result_or_create (dataset )
517497 else :
518- # We do not cache user tests as they can come up
519- # only once.
520498 object_ = UserTest .get_from_id (object_id , session )
499+ if object_ is None :
500+ logger .error ("Could not find user test %d "
501+ "in the database." , object_id )
502+ continue
521503 object_result = object_ .get_result_or_create (dataset )
522504
523505 self .write_results_one_object_and_type (
@@ -526,36 +508,44 @@ def write_results(self, items):
526508 logger .info ("Committing evaluations..." )
527509 session .commit ()
528510
529- for type_ , object_id , dataset_id in by_object_and_type :
511+ num_testcases_per_dataset = dict ()
512+ for type_ , object_id , dataset_id in iterkeys (by_object_and_type ):
530513 if type_ == ESOperation .EVALUATION :
531- submission_result = srs [(object_id , dataset_id )]
532- dataset = datasets [dataset_id ]
533- if len (submission_result .evaluations ) == \
534- len (dataset .testcases ):
514+ if dataset_id not in num_testcases_per_dataset :
515+ num_testcases_per_dataset [dataset_id ] = session \
516+ .query (func .count (Testcase .id ))\
517+ .filter (Testcase .dataset_id == dataset_id ).scalar ()
518+ num_evaluations = session \
519+ .query (func .count (Evaluation .id )) \
520+ .filter (Evaluation .dataset_id == dataset_id ) \
521+ .filter (Evaluation .submission_id == object_id ).scalar ()
522+ if num_evaluations == num_testcases_per_dataset [dataset_id ]:
523+ submission_result = SubmissionResult .get_from_id (
524+ (object_id , dataset_id ), session )
535525 submission_result .set_evaluation_outcome ()
536526
537527 logger .info ("Committing evaluation outcomes..." )
538528 session .commit ()
539529
540530 logger .info ("Ending operations for %s objects..." ,
541531 len (by_object_and_type ))
542- for type_ , object_id , dataset_id in by_object_and_type :
532+ for type_ , object_id , dataset_id in iterkeys ( by_object_and_type ) :
543533 if type_ == ESOperation .COMPILATION :
544- submission_result = srs [(object_id , dataset_id )]
534+ submission_result = SubmissionResult .get_from_id (
535+ (object_id , dataset_id ), session )
545536 self .compilation_ended (submission_result )
546537 elif type_ == ESOperation .EVALUATION :
547- submission_result = srs [(object_id , dataset_id )]
538+ submission_result = SubmissionResult .get_from_id (
539+ (object_id , dataset_id ), session )
548540 if submission_result .evaluated ():
549541 self .evaluation_ended (submission_result )
550542 elif type_ == ESOperation .USER_TEST_COMPILATION :
551- user_test_result = UserTest \
552- .get_from_id (object_id , session )\
553- .get_result (datasets [dataset_id ])
543+ user_test_result = UserTestResult .get_from_id (
544+ (object_id , dataset_id ), session )
554545 self .user_test_compilation_ended (user_test_result )
555546 elif type_ == ESOperation .USER_TEST_EVALUATION :
556- user_test_result = UserTest \
557- .get_from_id (object_id , session )\
558- .get_result (datasets [dataset_id ])
547+ user_test_result = UserTestResult .get_from_id (
548+ (object_id , dataset_id ), session )
559549 self .user_test_evaluation_ended (user_test_result )
560550
561551 logger .info ("Done" )
0 commit comments