Spaces:
				
			
			
	
			
			
					
		Running
		
			on 
			
			CPU Upgrade
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
			on 
			
			CPU Upgrade
	disable unused code
Browse files- app.py +1 -1
- main_backend.py +56 -55
    	
        app.py
    CHANGED
    
    | @@ -34,7 +34,7 @@ from src.populate import get_evaluation_queue_df, get_leaderboard_df | |
| 34 | 
             
            from src.submission.submit import add_new_eval
         | 
| 35 |  | 
| 36 |  | 
| 37 | 
            -
            subprocess.run(["python", "scripts/fix_harness_import.py"])
         | 
| 38 |  | 
| 39 | 
             
            def restart_space():
         | 
| 40 | 
             
                API.restart_space(repo_id=REPO_ID)
         | 
|  | |
| 34 | 
             
            from src.submission.submit import add_new_eval
         | 
| 35 |  | 
| 36 |  | 
| 37 | 
            +
            # subprocess.run(["python", "scripts/fix_harness_import.py"])
         | 
| 38 |  | 
| 39 | 
             
            def restart_space():
         | 
| 40 | 
             
                API.restart_space(repo_id=REPO_ID)
         | 
    	
        main_backend.py
    CHANGED
    
    | @@ -5,9 +5,9 @@ from huggingface_hub import snapshot_download | |
| 5 |  | 
| 6 | 
             
            logging.getLogger("openai").setLevel(logging.WARNING)
         | 
| 7 |  | 
| 8 | 
            -
            from src.backend.run_eval_suite import run_evaluation
         | 
| 9 | 
            -
            from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
         | 
| 10 | 
            -
            from src.backend.sort_queue import sort_models_by_priority
         | 
| 11 |  | 
| 12 | 
             
            from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, LIMIT, TOKEN
         | 
| 13 | 
             
            from src.about import Tasks, NUM_FEWSHOT
         | 
| @@ -21,58 +21,59 @@ RUNNING_STATUS = "RUNNING" | |
| 21 | 
             
            FINISHED_STATUS = "FINISHED"
         | 
| 22 | 
             
            FAILED_STATUS = "FAILED"
         | 
| 23 |  | 
|  | |
| 24 | 
             
            snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
         | 
| 25 | 
             
            snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
         | 
| 26 |  | 
| 27 | 
            -
            def run_auto_eval():
         | 
| 28 | 
            -
             | 
| 29 | 
            -
             | 
| 30 | 
            -
             | 
| 31 | 
            -
             | 
| 32 | 
            -
             | 
| 33 | 
            -
             | 
| 34 | 
            -
             | 
| 35 | 
            -
             | 
| 36 | 
            -
             | 
| 37 | 
            -
             | 
| 38 | 
            -
             | 
| 39 | 
            -
             | 
| 40 | 
            -
             | 
| 41 | 
            -
             | 
| 42 | 
            -
             | 
| 43 | 
            -
             | 
| 44 | 
            -
             | 
| 45 | 
            -
             | 
| 46 | 
            -
             | 
| 47 | 
            -
             | 
| 48 | 
            -
             | 
| 49 | 
            -
             | 
| 50 | 
            -
             | 
| 51 | 
            -
             | 
| 52 | 
            -
             | 
| 53 | 
            -
             | 
| 54 | 
            -
             | 
| 55 | 
            -
             | 
| 56 | 
            -
             | 
| 57 | 
            -
             | 
| 58 | 
            -
             | 
| 59 | 
            -
             | 
| 60 | 
            -
             | 
| 61 | 
            -
             | 
| 62 | 
            -
             | 
| 63 | 
            -
             | 
| 64 | 
            -
             | 
| 65 | 
            -
             | 
| 66 | 
            -
             | 
| 67 | 
            -
             | 
| 68 | 
            -
             | 
| 69 | 
            -
             | 
| 70 | 
            -
             | 
| 71 | 
            -
             | 
| 72 | 
            -
             | 
| 73 | 
            -
             | 
| 74 | 
            -
             | 
| 75 | 
            -
             | 
| 76 | 
            -
             | 
| 77 | 
            -
            if __name__ == "__main__":
         | 
| 78 | 
            -
             | 
|  | |
| 5 |  | 
| 6 | 
             
            logging.getLogger("openai").setLevel(logging.WARNING)
         | 
| 7 |  | 
| 8 | 
            +
            # from src.backend.run_eval_suite import run_evaluation
         | 
| 9 | 
            +
            # from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
         | 
| 10 | 
            +
            # from src.backend.sort_queue import sort_models_by_priority
         | 
| 11 |  | 
| 12 | 
             
            from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, LIMIT, TOKEN
         | 
| 13 | 
             
            from src.about import Tasks, NUM_FEWSHOT
         | 
|  | |
| 21 | 
             
            FINISHED_STATUS = "FINISHED"
         | 
| 22 | 
             
            FAILED_STATUS = "FAILED"
         | 
| 23 |  | 
| 24 | 
            +
            print('Downloading results and requests.')
         | 
| 25 | 
             
            snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
         | 
| 26 | 
             
            snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
         | 
| 27 |  | 
| 28 | 
            +
            # def run_auto_eval():
         | 
| 29 | 
            +
            #     current_pending_status = [PENDING_STATUS]
         | 
| 30 | 
            +
            #
         | 
| 31 | 
            +
            #     # pull the eval dataset from the hub and parse any eval requests
         | 
| 32 | 
            +
            #     # check completed evals and set them to finished
         | 
| 33 | 
            +
            #     check_completed_evals(
         | 
| 34 | 
            +
            #         api=API,
         | 
| 35 | 
            +
            #         checked_status=RUNNING_STATUS,
         | 
| 36 | 
            +
            #         completed_status=FINISHED_STATUS,
         | 
| 37 | 
            +
            #         failed_status=FAILED_STATUS,
         | 
| 38 | 
            +
            #         hf_repo=QUEUE_REPO,
         | 
| 39 | 
            +
            #         local_dir=EVAL_REQUESTS_PATH_BACKEND,
         | 
| 40 | 
            +
            #         hf_repo_results=RESULTS_REPO,
         | 
| 41 | 
            +
            #         local_dir_results=EVAL_RESULTS_PATH_BACKEND
         | 
| 42 | 
            +
            #     )
         | 
| 43 | 
            +
            #
         | 
| 44 | 
            +
            #     # Get all eval request that are PENDING, if you want to run other evals, change this parameter
         | 
| 45 | 
            +
            #     eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
         | 
| 46 | 
            +
            #     # Sort the evals by priority (first submitted first run)
         | 
| 47 | 
            +
            #     eval_requests = sort_models_by_priority(api=API, models=eval_requests)
         | 
| 48 | 
            +
            #
         | 
| 49 | 
            +
            #     print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
         | 
| 50 | 
            +
            #
         | 
| 51 | 
            +
            #     if len(eval_requests) == 0:
         | 
| 52 | 
            +
            #         return
         | 
| 53 | 
            +
            #
         | 
| 54 | 
            +
            #     eval_request = eval_requests[0]
         | 
| 55 | 
            +
            #     pp.pprint(eval_request)
         | 
| 56 | 
            +
            #
         | 
| 57 | 
            +
            #     set_eval_request(
         | 
| 58 | 
            +
            #         api=API,
         | 
| 59 | 
            +
            #         eval_request=eval_request,
         | 
| 60 | 
            +
            #         set_to_status=RUNNING_STATUS,
         | 
| 61 | 
            +
            #         hf_repo=QUEUE_REPO,
         | 
| 62 | 
            +
            #         local_dir=EVAL_REQUESTS_PATH_BACKEND,
         | 
| 63 | 
            +
            #     )
         | 
| 64 | 
            +
            #
         | 
| 65 | 
            +
            #     run_evaluation(
         | 
| 66 | 
            +
            #         eval_request=eval_request,
         | 
| 67 | 
            +
            #         task_names=TASKS_HARNESS,
         | 
| 68 | 
            +
            #         num_fewshot=NUM_FEWSHOT,
         | 
| 69 | 
            +
            #         local_dir=EVAL_RESULTS_PATH_BACKEND,
         | 
| 70 | 
            +
            #         results_repo=RESULTS_REPO,
         | 
| 71 | 
            +
            #         batch_size=1,
         | 
| 72 | 
            +
            #         device=DEVICE,
         | 
| 73 | 
            +
            #         no_cache=True,
         | 
| 74 | 
            +
            #         limit=LIMIT
         | 
| 75 | 
            +
            #         )
         | 
| 76 | 
            +
            #
         | 
| 77 | 
            +
            #
         | 
| 78 | 
            +
            # if __name__ == "__main__":
         | 
| 79 | 
            +
            #     run_auto_eval()
         | 
 
			
