@@ -66,21 +66,29 @@ def parse_chat(lines: list[str]) -> tuple[str, list[dict]]:
6666
6767 return your_name , messages
6868
69- def build_payload (model : str , your_name : str , chat_messages : list [dict ], temperature : float | None ) -> dict :
69+ def build_payload (model : str , your_name : str , chat_messages : list [dict ], temperature : float | None , prompt : str | None , max_tokens : int | None ) -> dict :
70+ prompt = prompt if prompt else "Suggest {your_name}'s next reply."
71+ prompt = prompt .replace ("{your_name}" , your_name )
72+
7073 system_msg = {
7174 "role" : "system" ,
72- "content" : f"You are { your_name } in a chat. Suggest { your_name } 's next reply."
75+ "content" : f"You are { your_name } in a chat. " + prompt
7376 }
7477 final_instruction = {
7578 "role" : "user" ,
76- "content" : f"Suggest { your_name } 's next reply."
79+ "content" : prompt
7780 }
7881 payload = {
7982 "model" : model ,
8083 "messages" : [system_msg ] + chat_messages + [final_instruction ]
8184 }
85+
8286 if temperature is not None :
8387 payload ["temperature" ] = temperature
88+
89+ if max_tokens is not None :
90+ payload ["max_tokens" ] = max_tokens
91+
8492 return payload
8593
8694def send_request (payload : dict , api_url : str , api_key : str , verbose : bool , timeout : int ) -> str :
@@ -136,6 +144,10 @@ def main():
136144 help = "Service: openai (default), gemini, or host[:port]/URL for OpenAI-compatible server." )
137145 parser .add_argument ("-m" , "--model" , default = None ,
138146 help = "Model name. Defaults depend on --service (openai: gpt-4o-mini, gemini: gemini-2.0-flash)." )
147+ parser .add_argument ("-M" , "--max-tokens" , type = int ,
148+ help = "Maximum number of output tokens to generate." )
149+ parser .add_argument ("-p" , "--prompt" , default = None ,
150+ help = "Instruction prompt (default: \" Suggest {your_name}'s next reply.\" )" )
139151 parser .add_argument ("-t" , "--temperature" , type = float ,
140152 help = "Sampling temperature (e.g., 0.2)." )
141153 parser .add_argument ("-v" , "--verbose" , action = "store_true" ,
@@ -169,7 +181,7 @@ def main():
169181
170182 lines = read_chat_file (args .chat_completion )
171183 your_name , chat_messages = parse_chat (lines )
172- payload = build_payload (model , your_name , chat_messages , args .temperature )
184+ payload = build_payload (model , your_name , chat_messages , args .temperature , args . prompt , args . max_tokens )
173185 reply = send_request (payload , api_url , api_key , verbose = args .verbose , timeout = args .timeout )
174186 reply_without_name = reply .split (":" , 1 )[1 ].strip () if ":" in reply else reply
175187 print (reply_without_name )
0 commit comments