trained the model and modify the law_ai_qna.py file

parent 6ea46ecd
...@@ -29,3 +29,22 @@ class QnADataset(Dataset): ...@@ -29,3 +29,22 @@ class QnADataset(Dataset):
return len(self.question_encoding) return len(self.question_encoding)
dataset = QnADataset(question_encoding, answer_encoding) dataset = QnADataset(question_encoding, answer_encoding)
training_args = TrainingArguments(
output_dir='Question n Answering', # output directory
num_train_epochs=1, # total # of training epochs
per_device_train_batch_size=100, # batch size per device during training
per_device_eval_batch_size=100, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='Question n Answering/logs', # directory for storing logs
logging_steps=10
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=dataset # evaluation dataset
)
trainer.train()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment