ModelInputΒΆ
-
class
pytext.models.query_document_pairwise_ranking_model.
ModelInput
Bases:
ModelInput
All Attributes (including base classes)
- pos_response: TokenTensorizer.Config = TokenTensorizer.Config(column=
'pos_response'
)- neg_response: TokenTensorizer.Config = TokenTensorizer.Config(column=
'neg_response'
)- query: TokenTensorizer.Config = TokenTensorizer.Config(column=
'query'
)
Default JSON
{
"pos_response": {
"is_input": true,
"column": "pos_response",
"tokenizer": {
"Tokenizer": {
"split_regex": "\\s+",
"lowercase": true,
"use_byte_offsets": false
}
},
"add_bos_token": false,
"add_eos_token": false,
"use_eos_token_for_bos": false,
"max_seq_len": null,
"vocab": {
"build_from_data": true,
"size_from_data": 0,
"min_counts": 0,
"vocab_files": []
},
"vocab_file_delimiter": " "
},
"neg_response": {
"is_input": true,
"column": "neg_response",
"tokenizer": {
"Tokenizer": {
"split_regex": "\\s+",
"lowercase": true,
"use_byte_offsets": false
}
},
"add_bos_token": false,
"add_eos_token": false,
"use_eos_token_for_bos": false,
"max_seq_len": null,
"vocab": {
"build_from_data": true,
"size_from_data": 0,
"min_counts": 0,
"vocab_files": []
},
"vocab_file_delimiter": " "
},
"query": {
"is_input": true,
"column": "query",
"tokenizer": {
"Tokenizer": {
"split_regex": "\\s+",
"lowercase": true,
"use_byte_offsets": false
}
},
"add_bos_token": false,
"add_eos_token": false,
"use_eos_token_for_bos": false,
"max_seq_len": null,
"vocab": {
"build_from_data": true,
"size_from_data": 0,
"min_counts": 0,
"vocab_files": []
},
"vocab_file_delimiter": " "
}
}