#!/usr/bin/env python # coding: utf-8 # # Creating Text-Fabric from GBI trees (XML nodes ) # The source data for the conversion are the XML node files representing the macula-greek version of the Nestle 1904 Greek New Testment. The most recent source data can be found on github https://github.com/Clear-Bible/macula-greek/tree/main/Nestle1904/nodes. Attribution: "MACULA Greek Linguistic Datasets, available at https://github.com/Clear-Bible/macula-greek/". # # The production of the Text-Fabric files consist of two steps. First the creation of piclke files (part 1). Secondly the actual TextFabric creation process (part 2). Both steps are independent allowing to start from Part 2 by using the pickle files as input. # # Be advised that this Text-Fabric version is a test version (proof of concept) and requires further finetuning, especialy with regards of nomenclature and presentation of (sub)phrases and clauses. # ## Table of content # * [Part 1: Read XML data and store in pickle](#first-bullet) # * [Part 2: Nestle1904 production from pickle input](#second-bullet) # * [Part 3: Testing the created textfabric data](#third-bullet) # ## Part 1: Read XML data and store in pickle # ##### [back to TOC](#TOC) # # This script harvests all information from the GBI tree data (XML nodes), puts it into a Panda DataFrame and stores the result per book in a pickle file. Note: pickling (in Python) is serialising an object into a disk file (or buffer). # # In the context of this script, 'Leaf' refers to those node containing the Greek word as data, which happen to be the nodes without any child (hence the analogy with the leaves on the tree). These 'leafs' can also be refered to as 'terminal nodes'. Futher, Parent1 is the leaf's parent, Parent2 is Parent1's parent, etc. # # For a full description of the source data see document [MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf](https://github.com/Clear-Bible/macula-greek/blob/main/doc/MACULA%20Greek%20Treebank%20for%20the%20Nestle%201904%20Greek%20New%20Testament.pdf) # ### Step 1: import various libraries # In[ ]: import pandas as pd import sys import os import time import pickle import re #regular expressions from os import listdir from os.path import isfile, join import xml.etree.ElementTree as ET # ### Step 2: initialize global data # # Change BaseDir, InputDir and OutputDir to match location of the datalocation and the OS used. # In[4]: BaseDir = 'C:\\Users\\tonyj\\my_new_Jupyter_folder\\test_of_xml_etree\\' InputDir = BaseDir+'inputfiles\\' OutputDir = BaseDir+'outputfiles\\' # key: filename, [0]=book_long, [1]=book_num, [3]=book_short bo2book = {'01-matthew': ['Matthew', '1', 'Matt'], '02-mark': ['Mark', '2', 'Mark'], '03-luke': ['Luke', '3', 'Luke'], '04-john': ['John', '4', 'John'], '05-acts': ['Acts', '5', 'Acts'], '06-romans': ['Romans', '6', 'Rom'], '07-1corinthians': ['I_Corinthians', '7', '1Cor'], '08-2corinthians': ['II_Corinthians', '8', '2Cor'], '09-galatians': ['Galatians', '9', 'Gal'], '10-ephesians': ['Ephesians', '10', 'Eph'], '11-philippians': ['Philippians', '11', 'Phil'], '12-colossians': ['Colossians', '12', 'Col'], '13-1thessalonians':['I_Thessalonians', '13', '1Thess'], '14-2thessalonians':['II_Thessalonians','14', '2Thess'], '15-1timothy': ['I_Timothy', '15', '1Tim'], '16-2timothy': ['II_Timothy', '16', '2Tim'], '17-titus': ['Titus', '17', 'Titus'], '18-philemon': ['Philemon', '18', 'Phlm'], '19-hebrews': ['Hebrews', '19', 'Heb'], '20-james': ['James', '20', 'Jas'], '21-1peter': ['I_Peter', '21', '1Pet'], '22-2peter': ['II_Peter', '22', '2Pet'], '23-1john': ['I_John', '23', '1John'], '24-2john': ['II_John', '24', '2John'], '25-3john': ['III_John', '25', '3John'], '26-jude': ['Jude', '26', 'Jude'], '27-revelation': ['Revelation', '27', 'Rev']} # ### step 3: define Function to add parent info to each node of the XML tree # # In order to traverse from the 'leafs' (terminating nodes) upto the root of the tree, it is required to add information to each node pointing to the parent of each node. # # (concept taken from https://stackoverflow.com/questions/2170610/access-elementtree-node-parent-node) # In[3]: def addParentInfo(et): for child in et: child.attrib['parent'] = et addParentInfo(child) def getParent(et): if 'parent' in et.attrib: return et.attrib['parent'] else: return None # ### Step 4: read and process the XML data and store panda dataframe in pickle # In[7]: # set some globals monad=1 CollectedItems= 0 # process books in order for bo, bookinfo in bo2book.items(): CollectedItems=0 full_df=pd.DataFrame({}) book_long=bookinfo[0] booknum=bookinfo[1] book_short=bookinfo[2] InputFile = os.path.join(InputDir, f'{bo}.xml') OutputFile = os.path.join(OutputDir, f'{bo}.pkl') print(f'Processing {book_long} at {InputFile}') # send xml document to parsing process tree = ET.parse(InputFile) # Now add all the parent info to the nodes in the xtree [important!] addParentInfo(tree.getroot()) start_time = time.time() # walk over all the leaves and harvest the data for elem in tree.iter(): if not list(elem): # if no child elements, this is a leaf/terminal node # show progress on screen CollectedItems+=1 if (CollectedItems%100==0): print (".",end='') #Leafref will contain list with book, chapter verse and wordnumber Leafref = re.sub(r'[!: ]'," ", elem.attrib.get('ref')).split() #push value for monad to element tree elem.set('monad', monad) monad+=1 # add some important computed data to the leaf elem.set('LeafName', elem.tag) elem.set('word', elem.text) elem.set('book_long', book_long) elem.set('booknum', int(booknum)) elem.set('book_short', book_short) elem.set('chapter', int(Leafref[1])) elem.set('verse', int(Leafref[2])) # folling code will trace down parents upto the tree and store found attributes parentnode=getParent(elem) index=0 while (parentnode): index+=1 elem.set('Parent{}Name'.format(index), parentnode.tag) elem.set('Parent{}Type'.format(index), parentnode.attrib.get('Type')) elem.set('Parent{}Cat'.format(index), parentnode.attrib.get('Cat')) elem.set('Parent{}Start'.format(index), parentnode.attrib.get('Start')) elem.set('Parent{}End'.format(index), parentnode.attrib.get('End')) elem.set('Parent{}Rule'.format(index), parentnode.attrib.get('Rule')) elem.set('Parent{}Head'.format(index), parentnode.attrib.get('Head')) elem.set('Parent{}NodeId'.format(index),parentnode.attrib.get('nodeId')) elem.set('Parent{}ClType'.format(index),parentnode.attrib.get('ClType')) elem.set('Parent{}HasDet'.format(index),parentnode.attrib.get('HasDet')) currentnode=parentnode parentnode=getParent(currentnode) elem.set('parents', int(index)) #this will push all elements found in the tree into a DataFrame df=pd.DataFrame(elem.attrib, index={monad}) full_df=pd.concat([full_df,df]) #store the resulting DataFrame per book into a pickle file for further processing df = df.convert_dtypes(convert_string=True) output = open(r"{}".format(OutputFile), 'wb') pickle.dump(full_df, output) output.close() print("\nFound ",CollectedItems, " items in %s seconds\n" % (time.time() - start_time)) # ## Part 2: Nestle1904 TextFabric production from pickle input # ##### [back to TOC](#TOC) # # This script creates the TextFabric files by recursive calling the TF walker function. # API info: https://annotation.github.io/text-fabric/tf/convert/walker.html # # The pickle files created by step 1 are stored on Github location https://github.com/tonyjurg/NA1904/tree/main/resources/picklefiles # ### Step 1: Load libraries and initialize some data # # Change BaseDir, InputDir and OutputDir to match location of the datalocation and the OS used. # In[29]: import pandas as pd import os import re import gc from tf.fabric import Fabric from tf.convert.walker import CV from tf.parameters import VERSION from datetime import date import pickle BaseDir = 'C:\\Users\\tonyj\\my_new_Jupyter_folder\\test_of_xml_etree\\' source_dir = BaseDir+'outputfiles\\' #the input for the walker is the output of the xml to excel output_dir = BaseDir+'outputfilesTF\\' #the TextFabric files output_dir = 'C:\\text-fabric-data\\github\\tjurg\\NA1904\\tf\\1904' # key: filename, [0]=book_long, [1]=book_num, [3]=book_short bo2book = {'01-matthew': ['Matthew', '1', 'Matt'], '02-mark': ['Mark', '2', 'Mark'], '03-luke': ['Luke', '3', 'Luke'], '04-john': ['John', '4', 'John'], '05-acts': ['Acts', '5', 'Acts'], '06-romans': ['Romans', '6', 'Rom'], '07-1corinthians': ['I_Corinthians', '7', '1Cor'], '08-2corinthians': ['II_Corinthians', '8', '2Cor'], '09-galatians': ['Galatians', '9', 'Gal'], '10-ephesians': ['Ephesians', '10', 'Eph'], '11-philippians': ['Philippians', '11', 'Phil'], '12-colossians': ['Colossians', '12', 'Col'], '13-1thessalonians':['I_Thessalonians', '13', '1Thess'], '14-2thessalonians':['II_Thessalonians','14', '2Thess'], '15-1timothy': ['I_Timothy', '15', '1Tim'], '16-2timothy': ['II_Timothy', '16', '2Tim'], '17-titus': ['Titus', '17', 'Titus'], '18-philemon': ['Philemon', '18', 'Phlm'], '19-hebrews': ['Hebrews', '19', 'Heb'], '20-james': ['James', '20', 'Jas'], '21-1peter': ['I_Peter', '21', '1Pet'], '22-2peter': ['II_Peter', '22', '2Pet'], '23-1john': ['I_John', '23', '1John'], '24-2john': ['II_John', '24', '2John'], '25-3john': ['III_John', '25', '3John'], '26-jude': ['Jude', '26', 'Jude'], '27-revelation': ['Revelation', '27', 'Rev']} # ### Step 2 Running the TF walker function # # API info: https://annotation.github.io/text-fabric/tf/convert/walker.html # # The logic of interpreting the data is included in the director function. # In[28]: TF = Fabric(locations=output_dir, silent=False) cv = CV(TF) version = "0.1 (Initial)" def sanitize(input): if isinstance(input, float): return '' else: return (input) def director(cv): NoneType = type(None) # needed as tool to validate certain data prev_book = "Matthew" # start at first book IndexDict = {} # init an empty dictionary for bo,bookinfo in bo2book.items(): ''' load all data into a dataframe process books in order (bookinfo is a list!) ''' book=bookinfo[0] booknum=int(bookinfo[1]) book_short=bookinfo[2] book_loc = os.path.join(source_dir, f'{bo}.pkl') print(f'\tloading {book_loc}...') pkl_file = open(book_loc, 'rb') df = pickle.load(pkl_file) pkl_file.close() FoundWords=0 phrasefunction='TBD' phrasefunction_long='TBD' this_clausetype="unknown" #just signal a not found case this_clauserule="unknown" phrasetype="unknown" #just signal a not found case prev_chapter = int(1) # start at 1 prev_verse = int(1) # start at 1 prev_sentence = int(1) # start at 1 prev_clause = int(1) # start at 1 prev_phrase = int(1) # start at 1 # reset/load the following initial variables (we are at the start of a new book) sentence_track = 1 sentence_done = False clause_track = 1 clause_done = False phrase_track = 1 phrase_done = False verse_done=False chapter_done = False book_done=False wrdnum = 0 # start at 0 # fill dictionary of column names for this book ItemsInRow=1 for itemname in df.columns.to_list(): IndexDict.update({'i_{}'.format(itemname): ItemsInRow}) ItemsInRow+=1 ''' Walks through the texts and triggers slot and node creation events. ''' # iterate through words and construct objects for row in df.itertuples(): wrdnum += 1 FoundWords +=1 ''' First get all the relevant information from the dataframe ''' # get number of parent nodes parents = row[IndexDict.get("i_parents")] # get chapter and verse from the data chapter = row[IndexDict.get("i_chapter")] verse = row[IndexDict.get("i_verse")] # get clause type info for i in range(1,parents-1): item = IndexDict.get("i_Parent{}Cat".format(i)) if row[item]=="CL": clauseparent=i prev_clausetype=this_clausetype _rule="i_Parent{}Rule".format(i) this_clausetype=row[IndexDict.get(_rule)] # get phrase type info prev_phrasetype=phrasetype for i in range(1,parents-1): item = IndexDict.get("i_Parent{}Cat".format(i)) if row[item]=="np": _item ="i_Parent{}Rule".format(i) phrasetype=row[IndexDict.get(_item)] break functionaltag=row[IndexDict.get('i_FunctionalTag')] ''' determine if conditions are met to trigger some action action will be executed after next word ''' # detect book boundary if prev_book != book: prev_book=book book_done = True chapter_done = True verse_done=True sentence_done = True clause_done = True phrase_done = True # detect chapter boundary if prev_chapter != chapter: chapter_done = True verse_done=True sentence_done = True clause_done = True phrase_done = True # detect verse boundary if prev_verse != verse: verse_done=True # determine syntactic categories of clause parts. See also the description in # "MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf" page 5&6 # (section 2.4 Syntactic Categories at Clause Level) prev_phrasefunction=phrasefunction prev_phrasefunction_long=phrasefunction_long phrase_done = False for i in range(1,clauseparent): phrasefunction = row[IndexDict.get("i_Parent{}Cat".format(i))] if phrasefunction=="ADV": phrasefunction_long='Adverbial function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="IO": phrasefunction_long='Indirect Object function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="O": phrasefunction_long='Object function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="O2": phrasefunction_long='Second Object function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="S": phrasefunction_long='Subject function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=='P': phrasefunction_long='Predicate function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="V": phrasefunction_long='Verbal function' if prev_phrasefunction!=phrasefunction: phrase_done = True break elif phrasefunction=="VC": phrasefunction_long='Verbal Copula function' if prev_phrasefunction!=phrasefunction: phrase_done = True break # determine syntactic categories at word level. See also the description in # "MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf" page 6&7 # (2.2. Syntactic Categories at Word Level: Part of Speech Labels) sp=sanitize(row[IndexDict.get("i_Cat")]) if sp=='adj': sp_full='adjective' elif sp=='adj': sp_full='adjective' elif sp=='conj': sp_full='conjunction' elif sp=='det': sp_full='determiner' elif sp=='intj': sp_full='interjection' elif sp=='noun': sp_full='noun' elif sp=='num': sp_full='numeral' elif sp=='prep': sp_full='preposition' elif sp=='ptcl': sp_full='particle' elif sp=='pron': sp_full='pronoun' elif sp=='verb': sp_full='verb' # Manage first word per book if wrdnum==1: prev_phrasetype=phrasetype prev_phrasefunction=phrasefunction prev_phrasefunction_long=phrasefunction_long book_done = False chapter_done = False verse_done = False phrase_done = False clause_done = False sentence_done = False # create the first set of nodes this_book = cv.node('book') cv.feature(this_book, book=prev_book) this_chapter = cv.node('chapter') this_verse = cv.node('verse') this_sentence = cv.node('sentence') this_clause = cv.node('clause') this_phrase = cv.node('phrase') sentence_track += 1 clause_track += 1 phrase_track += 1 ''' -- handle TF events -- Determine what actions need to be done if proper condition is met. ''' # act upon end of phrase (close) if phrase_done or clause_done: cv.feature(this_phrase, phrase=prev_phrase, phrasetype=prev_phrasetype, phrasefunction=prev_phrasefunction, phrasefunction_long=prev_phrasefunction_long) cv.terminate(this_phrase) # act upon end of clause (close) if clause_done: cv.feature(this_clause, clause=prev_clause, clausetype=prev_clausetype) cv.terminate(this_clause) # act upon end of sentence (close) if sentence_done: cv.feature(this_sentence, sentence=prev_sentence) cv.terminate(this_sentence) # act upon end of verse (close) if verse_done: cv.feature(this_verse, verse=prev_verse) cv.terminate(this_verse) prev_verse = verse # act upon end of chapter (close) if chapter_done: cv.feature(this_chapter, chapter=prev_chapter) cv.terminate(this_chapter) prev_chapter = chapter # act upon end of book (close and open new) if book_done: cv.terminate(this_book) this_book = cv.node('book') cv.feature(this_book, book=book) prev_book = book wrdnum = 1 phrase_track = 1 clause_track = 1 sentence_track = 1 book_done = False # start of chapter (create new) if chapter_done: this_chapter = cv.node('chapter') chapter_done = False # start of verse (create new) if verse_done: this_verse = cv.node('verse') verse_done = False # start of sentence (create new) if sentence_done: this_sentence= cv.node('sentence') prev_sentence = sentence_track sentence_track += 1 sentence_done = False # start of clause (create new) if clause_done: this_clause = cv.node('clause') prev_clause = clause_track clause_track += 1 clause_done = False phrase_done = True # start of phrase (create new) if phrase_done: this_phrase = cv.node('phrase') prev_phrase = phrase_track prev_phrasefunction=phrasefunction prev_phrasefunction_long=phrasefunction_long phrase_track += 1 phrase_done = False # Detect boundaries of sentences, clauses and phrases text=row[IndexDict.get("i_Unicode")] if text[-1:] == "." : sentence_done = True clause_done = True phrase_done = True if text[-1:] == ";" or text[-1:] == ",": clause_done = True phrase_done = True ''' -- create word nodes -- ''' # some attributes are not present inside some (small) books. The following is to prevent exceptions. degree='' if 'i_Degree' in IndexDict: degree=sanitize(row[IndexDict.get("i_Degree")]) subjref='' if 'i_SubjRef' in IndexDict: subjref=sanitize(row[IndexDict.get("i_SubjRef")]) # make word object this_word = cv.slot() cv.feature(this_word, word=row[IndexDict.get("i_Unicode")], monad=row[IndexDict.get("i_monad")], orig_order=row[IndexDict.get("i_monad")], book_long=row[IndexDict.get("i_book_long")], booknum=booknum, book_short=row[IndexDict.get("i_book_short")], chapter=chapter, sp=sp, sp_full=sp_full, verse=verse, sentence=prev_sentence, clause=prev_clause, phrase=prev_phrase, normalized=sanitize(row[IndexDict.get("i_NormalizedForm")]), formaltag=sanitize(row[IndexDict.get("i_FormalTag")]), functionaltag=functionaltag, strongs=sanitize(row[IndexDict.get("i_StrongNumber")]), lex_dom=sanitize(row[IndexDict.get("i_LexDomain")]), ln=sanitize(row[IndexDict.get("i_LN")]), gloss_EN=sanitize(row[IndexDict.get("i_Gloss")]), gn=sanitize(row[IndexDict.get("i_Gender")]), nu=sanitize(row[IndexDict.get("i_Number")]), case=sanitize(row[IndexDict.get("i_Case")]), lemma=sanitize(row[IndexDict.get("i_UnicodeLemma")]), person=sanitize(row[IndexDict.get("i_Person")]), mood=sanitize(row[IndexDict.get("i_Mood")]), tense=sanitize(row[IndexDict.get("i_Tense")]), number=sanitize(row[IndexDict.get("i_Number")]), voice=sanitize(row[IndexDict.get("i_Voice")]), degree=degree, type=sanitize(row[IndexDict.get("i_Type")]), reference=sanitize(row[IndexDict.get("i_Ref")]), # the capital R is critical here! subj_ref=subjref, nodeID=row[1] #this is a fixed position. ) cv.terminate(this_word) ''' -- wrap up the book -- ''' # close all nodes (phrase, clause, sentence, verse, chapter and book) cv.feature(this_phrase, phrase=phrase_track, phrasetype=prev_phrasetype,phrasefunction=prev_phrasefunction,phrasefunction_long=prev_phrasefunction_long) cv.terminate(this_phrase) cv.feature(this_clause, clause=prev_clause, clausetype=prev_clausetype) cv.terminate(this_clause) cv.feature(this_sentence, sentence=prev_sentence) cv.terminate(this_sentence) cv.feature(this_verse, verse=prev_verse) cv.terminate(this_verse) cv.feature(this_chapter, chapter=prev_chapter) cv.terminate(this_chapter) cv.feature(this_book, book=prev_book) cv.terminate(this_book) # clear dataframe for this book del df # clear the index dictionary IndexDict.clear() gc.collect() ''' -- output definitions -- ''' slotType = 'word' # or whatever you choose otext = { # dictionary of config data for sections and text formats 'fmt:text-orig-full':'{word}', 'sectionTypes':'book,chapter,verse', 'sectionFeatures':'book,chapter,verse', 'structureFeatures': 'book,chapter,verse', 'structureTypes': 'book,chapter,verse', } # configure metadata generic = { # dictionary of metadata meant for all features 'Name': 'Greek New Testament (NA1904)', 'Version': '1904', 'Editors': 'Nestle & Aland', 'Data source': 'MACULA Greek Linguistic Datasets, available at https://github.com/Clear-Bible/macula-greek/tree/main/Nestle1904/nodes', 'Availability': 'Creative Commons Attribution 4.0 International (CC BY 4.0)', 'Converter_author': 'Tony Jurg, Vrije Universiteit Amsterdam, Netherlands', 'Converter_execution': 'Tony Jurg, Vrije Universiteit Amsterdam, Netherlands', 'Convertor_source': 'https://github.com/tonyjurg/NA1904/tree/main/resources/converter', 'Converter_version': '{}'.format(version), 'TextFabric version': '{}'.format(VERSION) #imported from tf.parameters } intFeatures = { # set of integer valued feature names 'booknum', 'chapter', 'verse', 'sentence', 'clause', 'phrase', 'orig_order', 'monad' } featureMeta = { # per feature dicts with metadata 'book': {'description': 'Book'}, 'book_long': {'description': 'Book name (fully spelled out)'}, 'booknum': {'description': 'NT book number (Matthew=1, Mark=2, ..., Revelation=27)'}, 'book_short': {'description': 'Book name (abbreviated)'}, 'chapter': {'description': 'Chapter number inside book'}, 'verse': {'description': 'Verse number inside chapter'}, 'sentence': {'description': 'Sentence number (counted per chapter)'}, 'clause': {'description': 'Clause number (counted per chapter)'}, 'clausetype' : {'description': 'Clause type information (verb, verbless, elided, minor, etc.)'}, 'phrase' : {'description': 'Phrase number (counted per chapter)'}, 'phrasetype' : {'description': 'Phrase type information'}, 'phrasefunction' : {'description': 'Phrase function (abbreviated)'}, 'phrasefunction_long' : {'description': 'Phrase function (long description)'}, 'orig_order': {'description': 'Word order within corpus'}, 'monad':{'description': 'Monad'}, 'word': {'description': 'Word as it appears in the text'}, 'sp': {'description': 'Part of Speech (abbreviated)'}, 'sp_full': {'description': 'Part of Speech (long description)'}, 'normalized': {'description': 'Surface word stripped of punctations'}, 'lemma': {'description': 'Lexeme (lemma)'}, 'formaltag': {'description': 'Formal tag (Sandborg-Petersen morphology)'}, 'functionaltag': {'description': 'Functional tag (Sandborg-Petersen morphology)'}, # see also discussion on relation between lex_dom and ln @ https://github.com/Clear-Bible/macula-greek/issues/29 'lex_dom': {'description': 'Lexical domain according to Semantic Dictionary of Biblical Greek, SDBG (not present everywhere?)'}, 'ln': {'description': 'Lauw-Nida lexical classification (not present everywhere?)'}, 'strongs': {'description': 'Strongs number'}, 'gloss_EN': {'description': 'English gloss'}, 'gn': {'description': 'Gramatical gender (Masculine, Feminine, Neuter)'}, 'nu': {'description': 'Gramatical number (Singular, Plural)'}, 'case': {'description': 'Gramatical case (Nominative, Genitive, Dative, Accusative, Vocative)'}, 'person': {'description': 'Gramatical person of the verb (first, second, third)'}, 'mood': {'description': 'Gramatical mood of the verb (passive, etc)'}, 'tense': {'description': 'Gramatical tense of the verb (e.g. Present, Aorist)'}, 'number': {'description': 'Gramatical number of the verb'}, 'voice': {'description': 'Gramatical voice of the verb'}, 'degree': {'description': 'Degree (e.g. Comparitative, Superlative)'}, 'type': {'description': 'Gramatical type of noun or pronoun (e.g. Common, Personal)'}, 'reference': {'description': 'Reference (to nodeID in XML source data, not yet post-processes)'}, 'subj_ref': {'description': 'Subject reference (to nodeID in XML source data, not yet post-processes)'}, 'nodeID': {'description': 'Node ID (as in the XML source data, not yet post-processes)'} } ''' -- the main function -- ''' good = cv.walk( director, slotType, otext=otext, generic=generic, intFeatures=intFeatures, featureMeta=featureMeta, warn=True, force=False ) if good: print ("done") # ## Part 3: Testing the created textfabric data # ##### [back to TOC](#TOC) # ### Step 1 load the TF data # # The TF will be loaded from local copy of github repository # In[29]: get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # In[2]: # First, I have to laod different modules that I use for analyzing the data and for plotting: import sys, os, collections import pandas as pd import numpy as np import re from tf.fabric import Fabric from tf.app import use # The following cell loads the TextFabric files from a local disc. # Change accordingly. # In[3]: # Loading-the-New-Testament-Text-Fabric (from local disk) NA = use ("tjurg/NA1904", checkData="clone", hoist=globals()) # ### Step 2 Perform some basic display # # note: the implementation with regards how phrases need to be displayed (esp. with regards to conjunctions) is still to be done. # In[12]: Search0 = ''' book book=Matthew chapter chapter=1 verse ''' Search0 = NA.search(Search0) NA.show(Search0, start=1, end=8, condensed=True, extraFeatures={'clausetype','sp_full','phrasetype', 'gloss_EN','person','tense','voice','number','gn','mood', 'phrasefunction_long'}, withNodes=False) # ### Step 3 dump some structure information # In[33]: T.structureInfo() # In[13]: T.up(232892) # In[34]: TF.features['otext'].metaData # ## Running text fabric browser # ##### [back to TOC](#TOC) # In[16]: get_ipython().system('text-fabric app') # In[20]: get_ipython().system('text-fabric app -k') # In[ ]: