from fastai.gen_doc.nbdoc import * from fastai.text import * show_doc(language_model_learner) jekyll_note("Using QRNN (change the flag in the config of the AWD LSTM) requires to have cuda installed (same version as pytorch is using).") path = untar_data(URLs.IMDB_SAMPLE) data = TextLMDataBunch.from_csv(path, 'texts.csv') learn = language_model_learner(data, AWD_LSTM, drop_mult=0.5) show_doc(text_classifier_learner) path = untar_data(URLs.IMDB_SAMPLE) data = TextClasDataBunch.from_csv(path, 'texts.csv') learn = text_classifier_learner(data, AWD_LSTM, drop_mult=0.5) show_doc(RNNLearner) show_doc(RNNLearner.get_preds) show_doc(TextClassificationInterpretation,title_level=3) import matplotlib.cm as cm txt_ci = TextClassificationInterpretation.from_learner(learn) test_text = "Zombiegeddon was perhaps the GREATEST movie i have ever seen!" txt_ci.show_intrinsic_attention(test_text,cmap=cm.Purples) txt_ci.intrinsic_attention(test_text)[1] txt_ci.show_top_losses(5) show_doc(RNNLearner.load_encoder) show_doc(RNNLearner.save_encoder) show_doc(RNNLearner.load_pretrained) show_doc(convert_weights) show_doc(LanguageLearner, title_level=3) show_doc(LanguageLearner.predict) show_doc(LanguageLearner.beam_search) show_doc(get_language_model) show_doc(get_text_classifier) show_doc(MultiBatchEncoder.forward) show_doc(LanguageLearner.show_results) show_doc(MultiBatchEncoder.concat) show_doc(MultiBatchEncoder) show_doc(decode_spec_tokens) show_doc(MultiBatchEncoder.reset)