LMLSTM.ConfigΒΆ

Component: LMLSTM

class LMLSTM.Config[source]

Bases: BaseModel.Config

All Attributes (including base classes)

inputs: ModelInput = ModelInput()
embedding: WordEmbedding.Config = WordEmbedding.Config()
representation: Union[BiLSTM.Config, DeepCNNRepresentation.Config] = BiLSTM.Config(bidirectional=False)
decoder: Optional[MLPDecoder.Config] = MLPDecoder.Config()
output_layer: LMOutputLayer.Config = LMOutputLayer.Config()
tied_weights: bool = False
stateful: bool = False
caffe2_format: ExporterType = <ExporterType.PREDICTOR: 'predictor'>

Default JSON

{
    "inputs": {
        "tokens": {
            "is_input": true,
            "column": "text",
            "tokenizer": {
                "Tokenizer": {
                    "split_regex": "\\s+",
                    "lowercase": true,
                    "use_byte_offsets": false
                }
            },
            "add_bos_token": true,
            "add_eos_token": true,
            "use_eos_token_for_bos": false,
            "max_seq_len": null,
            "vocab": {
                "build_from_data": true,
                "size_from_data": 0,
                "min_counts": 0,
                "vocab_files": []
            },
            "vocab_file_delimiter": " "
        }
    },
    "embedding": {
        "load_path": null,
        "save_path": null,
        "freeze": false,
        "shared_module_key": null,
        "embed_dim": 100,
        "embedding_init_strategy": "random",
        "embedding_init_range": null,
        "embeddding_init_std": 0.02,
        "export_input_names": [
            "tokens_vals"
        ],
        "pretrained_embeddings_path": "",
        "vocab_file": "",
        "vocab_size": 0,
        "vocab_from_train_data": true,
        "vocab_from_all_data": false,
        "vocab_from_pretrained_embeddings": false,
        "lowercase_tokens": true,
        "min_freq": 1,
        "mlp_layer_dims": [],
        "padding_idx": null,
        "cpu_only": false,
        "skip_header": true,
        "delimiter": " "
    },
    "representation": {
        "BiLSTM": {
            "load_path": null,
            "save_path": null,
            "freeze": false,
            "shared_module_key": null,
            "dropout": 0.4,
            "lstm_dim": 32,
            "num_layers": 1,
            "bidirectional": false,
            "pack_sequence": true,
            "disable_sort_in_jit": false
        }
    },
    "decoder": {
        "load_path": null,
        "save_path": null,
        "freeze": false,
        "shared_module_key": null,
        "hidden_dims": [],
        "out_dim": null,
        "layer_norm": false,
        "dropout": 0.0,
        "bias": true,
        "activation": "relu",
        "temperature": 1.0,
        "spectral_normalization": false
    },
    "output_layer": {
        "load_path": null,
        "save_path": null,
        "freeze": false,
        "shared_module_key": null,
        "loss": {}
    },
    "tied_weights": false,
    "stateful": false,
    "caffe2_format": "predictor"
}