from fastai.gen_doc.nbdoc import * from fastai.text import * from fastai.text.models import * show_doc(AWD_LSTM, title_level=3) show_doc(AWD_LSTM.reset) show_doc(Transformer, title_level=3) show_doc(TransformerXL, title_level=3) show_doc(TransformerXL.reset) show_doc(LinearDecoder, title_level=3) show_doc(PoolingLinearClassifier, title_level=3) show_doc(PoolingLinearClassifier.pool) show_doc(EmbeddingDropout, title_level=3) enc = nn.Embedding(100, 7, padding_idx=1) enc_dp = EmbeddingDropout(enc, 0.5) tst_input = torch.randint(0,100,(8,)) enc_dp(tst_input) show_doc(RNNDropout, title_level=3) dp = RNNDropout(0.3) tst_input = torch.randn(3,3,7) tst_input, dp(tst_input) show_doc(WeightDropout, title_level=3) module = nn.LSTM(5, 2) dp_module = WeightDropout(module, 0.4) getattr(dp_module.module, 'weight_hh_l0') tst_input = torch.randn(4,20,5) h = (torch.zeros(1,20,2), torch.zeros(1,20,2)) x,h = dp_module(tst_input,h) getattr(dp_module.module, 'weight_hh_l0') show_doc(PositionalEncoding, title_level=3) show_doc(DecoderLayer, title_level=3) show_doc(MultiHeadAttention, title_level=3) show_doc(MultiHeadRelativeAttention, title_level=3) show_doc(SequentialRNN, title_level=3) show_doc(SequentialRNN.reset) show_doc(dropout_mask) tst_input = torch.randn(3,3,7) dropout_mask(tst_input, (3,7), 0.3) show_doc(feed_forward) show_doc(WeightDropout.forward) show_doc(EmbeddingDropout.forward) show_doc(RNNDropout.forward) show_doc(WeightDropout.reset) show_doc(PoolingLinearClassifier.forward) show_doc(LinearDecoder.forward)