duzx16 commited on
Commit ·
b259b27
1
Parent(s): a1170c5
Change default max length
Browse files- modeling_chatglm.py +2 -2
modeling_chatglm.py
CHANGED
|
@@ -935,7 +935,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 935 |
|
| 936 |
|
| 937 |
@torch.no_grad()
|
| 938 |
-
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int =
|
| 939 |
do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):
|
| 940 |
if history is None:
|
| 941 |
history = []
|
|
@@ -954,7 +954,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 954 |
|
| 955 |
@torch.no_grad()
|
| 956 |
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,
|
| 957 |
-
max_length: int =
|
| 958 |
return_past_key_values=False, **kwargs):
|
| 959 |
if history is None:
|
| 960 |
history = []
|
|
|
|
| 935 |
|
| 936 |
|
| 937 |
@torch.no_grad()
|
| 938 |
+
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 8192, num_beams=1,
|
| 939 |
do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):
|
| 940 |
if history is None:
|
| 941 |
history = []
|
|
|
|
| 954 |
|
| 955 |
@torch.no_grad()
|
| 956 |
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,
|
| 957 |
+
max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
|
| 958 |
return_past_key_values=False, **kwargs):
|
| 959 |
if history is None:
|
| 960 |
history = []
|