浏览代码

fix: Resolve log_mode issue in aios_starter() function (#274)

* Fix Issue-aios_starter() got log_mode problem

* Update

* Update
xiang 1 天之前
父节点
当前提交
dbf241d403
共有 1 个文件被更改,包括 5 次插入4 次删除
  1. 5 4
      aios/utils/utils.py

+ 5 - 4
aios/utils/utils.py

@@ -23,11 +23,12 @@ def parse_global_args():
     parser.add_argument('--max_gpu_memory', type=json.loads, help="Max gpu memory allocated for the LLM")
     parser.add_argument('--eval_device', type=str, help="Evaluation device")
     parser.add_argument('--max_new_tokens', type=int, default=256, help="The maximum number of new tokens for generation")
-    parser.add_argument("--log_mode", type=str, default="console", choices=["console", "file"])
-    parser.add_argument("--use_backend", type=str, choices=["ollama", "vllm"])
-
+    # parser.add_argument("--log_mode", type=str, default="console", choices=["console", "file"])
+    parser.add_argument("--scheduler_log_mode", type=str, default="console", choices=["console", "file"], help="Log mode for the scheduler")
+    parser.add_argument("--agent_log_mode", type=str, default="console", choices=["console", "file"], help="Log mode for the agents")
+    parser.add_argument("--llm_kernel_log_mode", type=str, default="console", choices=["console", "file"], help="Log mode for the LLM kernel")
+    parser.add_argument("--use_backend", type=str, choices=["ollama", "vllm"], help="Backend to use for running the LLM kernel")
     return parser
-
 def extract_before_parenthesis(s: str) -> str:
     match = re.search(r'^(.*?)\([^)]*\)', s)
     return match.group(1) if match else s