#!/usr/bin/env python # coding: utf-8 # ## Creating Text-Fabric from LowFat XML trees # # Version: 0.3 (Jun 17, 2023) # # ## Table of content # * [1. Introduction](#first-bullet) # * [2. Read LowFat XML data and store in pickle](#second-bullet) # * [3. Production Text-Fabric from pickle input](#third-bullet) # * [4. Basic testing of the textfabric data](#fourth-bullet) # # ## 1. Introduction # ##### [Back to TOC](#TOC) # # The source data for the conversion are the LowFat XML trees files representing the macula-greek version of the Nestle 1904 Greek New Testment. The most recent source data can be found on github https://github.com/Clear-Bible/macula-greek/tree/main/Nestle1904/lowfat. Attribution: "MACULA Greek Linguistic Datasets, available at https://github.com/Clear-Bible/macula-greek/". # # The production of the Text-Fabric files consist of two steps. First the creation of piclke files (part 1). Secondly the actual Text-Fabric creation process (part 2). Both steps are independent allowing to start from Part 2 by using the pickle files as input. # # # # Be advised that this Text-Fabric version is a test version (proof of concept) and requires further finetuning, especialy with regards of nomenclature and presentation of (sub)phrases and clauses. # ## 2. Read LowFat XML data and store in pickle # ##### [Back to TOC](#TOC) # # This script harvests all information from the LowFat tree data (XML nodes), puts it into a Panda DataFrame and stores the result per book in a pickle file. Note: pickling (in Python) is serialising an object into a disk file (or buffer). # # In the context of this script, 'Leaf' refers to those node containing the Greek word as data, which happen to be the nodes without any child (hence the analogy with the leaves on the tree). These 'leafs' can also be refered to as 'terminal nodes'. Futher, Parent1 is the leaf's parent, Parent2 is Parent1's parent, etc. # # For a full description of the source data see document [MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf](https://github.com/Clear-Bible/macula-greek/blob/main/doc/MACULA%20Greek%20Treebank%20for%20the%20Nestle%201904%20Greek%20New%20Testament.pdf) # ### Step 1: import various libraries # In[1]: import pandas as pd import sys import os import time import pickle import re #regular expressions from os import listdir from os.path import isfile, join import xml.etree.ElementTree as ET # ### Step 2: initialize global data # # Change BaseDir, XmlDir and PklDir to match location of the datalocation and the OS used. # In[29]: BaseDir = 'C:\\Users\\tonyj\\my_new_Jupyter_folder\\Read_from_lowfat\\data\\' XmlDir = BaseDir+'xml\\' PklDir = BaseDir+'pkl\\' XlsxDir = BaseDir+'xlsx\\' # note: create output directory prior running this part # key: filename, [0]=book_long, [1]=book_num, [3]=book_short bo2book = {'01-matthew': ['Matthew', '1', 'Matt'], '02-mark': ['Mark', '2', 'Mark'], '03-luke': ['Luke', '3', 'Luke'], '04-john': ['John', '4', 'John'], '05-acts': ['Acts', '5', 'Acts'], '06-romans': ['Romans', '6', 'Rom'], '07-1corinthians': ['I_Corinthians', '7', '1Cor'], '08-2corinthians': ['II_Corinthians', '8', '2Cor'], '09-galatians': ['Galatians', '9', 'Gal'], '10-ephesians': ['Ephesians', '10', 'Eph'], '11-philippians': ['Philippians', '11', 'Phil'], '12-colossians': ['Colossians', '12', 'Col'], '13-1thessalonians':['I_Thessalonians', '13', '1Thess'], '14-2thessalonians':['II_Thessalonians','14', '2Thess'], '15-1timothy': ['I_Timothy', '15', '1Tim'], '16-2timothy': ['II_Timothy', '16', '2Tim'], '17-titus': ['Titus', '17', 'Titus'], '18-philemon': ['Philemon', '18', 'Phlm'], '19-hebrews': ['Hebrews', '19', 'Heb'], '20-james': ['James', '20', 'Jas'], '21-1peter': ['I_Peter', '21', '1Pet'], '22-2peter': ['II_Peter', '22', '2Pet'], '23-1john': ['I_John', '23', '1John'], '24-2john': ['II_John', '24', '2John'], '25-3john': ['III_John', '25', '3John'], '26-jude': ['Jude', '26', 'Jude'], '27-revelation': ['Revelation', '27', 'Rev']} # ### step 3: define Function to add parent info to each node of the XML tree # # In order to traverse from the 'leafs' (terminating nodes) upto the root of the tree, it is required to add information to each node pointing to the parent of each node. # # (concept taken from https://stackoverflow.com/questions/2170610/access-elementtree-node-parent-node) # In[30]: def addParentInfo(et): for child in et: child.attrib['parent'] = et addParentInfo(child) def getParent(et): if 'parent' in et.attrib: return et.attrib['parent'] else: return None # ### Step 4: read and process the XML data and store panda dataframe in pickle # In[32]: # set some globals monad=1 CollectedItems= 0 # process books in order for bo, bookinfo in bo2book.items(): CollectedItems=0 SentenceNumber=0 WordGroupNumber=0 full_df=pd.DataFrame({}) book_long=bookinfo[0] booknum=bookinfo[1] book_short=bookinfo[2] InputFile = os.path.join(XmlDir, f'{bo}.xml') OutputFile = os.path.join(PklDir, f'{bo}.pkl') print(f'Processing {book_long} at {InputFile}') DataFrameList = [] # Send XML document to parsing process tree = ET.parse(InputFile) # Now add all the parent info to the nodes in the xtree [important!] addParentInfo(tree.getroot()) start_time = time.time() # walk over all the XML data for elem in tree.iter(): if elem.tag == 'sentence': # add running number to 'sentence' tags SentenceNumber+=1 elem.set('SN', SentenceNumber) if elem.tag == 'wg': # add running number to 'wg' tags WordGroupNumber+=1 elem.set('WGN', WordGroupNumber) if elem.tag == 'w': # all nodes containing words are tagged with 'w' # show progress on screen CollectedItems+=1 if (CollectedItems%100==0): print (".",end='') #Leafref will contain list with book, chapter verse and wordnumber Leafref = re.sub(r'[!: ]'," ", elem.attrib.get('ref')).split() #push value for monad to element tree elem.set('monad', monad) monad+=1 # add some important computed data to the leaf elem.set('LeafName', elem.tag) elem.set('word', elem.text) elem.set('book_long', book_long) elem.set('booknum', int(booknum)) elem.set('book_short', book_short) elem.set('chapter', int(Leafref[1])) elem.set('verse', int(Leafref[2])) # folling code will trace down parents upto the tree and store found attributes parentnode=getParent(elem) index=0 while (parentnode): index+=1 elem.set('Parent{}Name'.format(index), parentnode.tag) elem.set('Parent{}Type'.format(index), parentnode.attrib.get('type')) elem.set('Parent{}Appos'.format(index), parentnode.attrib.get('appositioncontainer')) elem.set('Parent{}Class'.format(index), parentnode.attrib.get('class')) elem.set('Parent{}Rule'.format(index), parentnode.attrib.get('rule')) elem.set('Parent{}Role'.format(index), parentnode.attrib.get('role')) elem.set('Parent{}Cltype'.format(index), parentnode.attrib.get('cltype')) elem.set('Parent{}Unit'.format(index), parentnode.attrib.get('unit')) elem.set('Parent{}Junction'.format(index), parentnode.attrib.get('junction')) elem.set('Parent{}SN'.format(index), parentnode.attrib.get('SN')) elem.set('Parent{}WGN'.format(index), parentnode.attrib.get('WGN')) currentnode=parentnode parentnode=getParent(currentnode) elem.set('parents', int(index)) #this will add all elements found in the tree to a list of dataframes DataFrameChunk=pd.DataFrame(elem.attrib, index={monad}) DataFrameList.append(DataFrameChunk) #store the resulting DataFrame per book into a pickle file for further processing full_df = pd.concat([df for df in DataFrameList]) output = open(r"{}".format(OutputFile), 'wb') pickle.dump(full_df, output) output.close() print("\nFound ",CollectedItems, " items in %s seconds\n" % (time.time() - start_time)) # ## 3. Nestle1904 Text-Fabric production from pickle input # ##### [Back to TOC](#TOC) # # This script creates the Text-Fabric files by recursive calling the TF walker function. # API info: https://annotation.github.io/text-fabric/tf/convert/walker.html # # The pickle files created by step 1 are stored on Github location [resources/pickle](pickle/README.md). # ### Step 1: Load libraries and initialize some data # # IMPORTANT: To ensure proper creation of the Text-Fabric files on your system, it is crucial to adjust the values of BaseDir, PklDir, etc. to match the location of the data and the operating system you are using. In this Jupyter Notebook, Windows is the operating system employed. # # In[3]: import pandas as pd import os import re import gc from tf.fabric import Fabric from tf.convert.walker import CV from tf.parameters import VERSION from datetime import date import pickle BaseDir = 'C:\\Users\\tonyj\\my_new_Jupyter_folder\\Read_from_lowfat\\data\\' XmlDir = BaseDir+'xml\\' PklDir = BaseDir+'pkl\\' XlsxDir = BaseDir+'xlsx\\' # key: filename, [0]=book_long, [1]=book_num, [3]=book_short bo2book = {'01-matthew': ['Matthew', '1', 'Matt'], '02-mark': ['Mark', '2', 'Mark'], '03-luke': ['Luke', '3', 'Luke'], '04-john': ['John', '4', 'John'], '05-acts': ['Acts', '5', 'Acts'], '06-romans': ['Romans', '6', 'Rom'], '07-1corinthians': ['I_Corinthians', '7', '1Cor'], '08-2corinthians': ['II_Corinthians', '8', '2Cor'], '09-galatians': ['Galatians', '9', 'Gal'], '10-ephesians': ['Ephesians', '10', 'Eph'], '11-philippians': ['Philippians', '11', 'Phil'], '12-colossians': ['Colossians', '12', 'Col'], '13-1thessalonians':['I_Thessalonians', '13', '1Thess'], '14-2thessalonians':['II_Thessalonians','14', '2Thess'], '15-1timothy': ['I_Timothy', '15', '1Tim'], '16-2timothy': ['II_Timothy', '16', '2Tim'], '17-titus': ['Titus', '17', 'Titus'], '18-philemon': ['Philemon', '18', 'Phlm'], '19-hebrews': ['Hebrews', '19', 'Heb'], '20-james': ['James', '20', 'Jas'], '21-1peter': ['I_Peter', '21', '1Pet'], '22-2peter': ['II_Peter', '22', '2Pet'], '23-1john': ['I_John', '23', '1John'], '24-2john': ['II_John', '24', '2John'], '25-3john': ['III_John', '25', '3John'], '26-jude': ['Jude', '26', 'Jude'], '27-revelation': ['Revelation', '27', 'Rev']} # ## Optional: export to Excel for investigation # In[38]: # test: sorting the data import openpyxl import pickle #if True: for bo in bo2book: ''' load all data into a dataframe process books in order (bookinfo is a list!) ''' InputFile = os.path.join(PklDir, f'{bo}.pkl') print(f'\tloading {InputFile}...') pkl_file = open(InputFile, 'rb') df = pickle.load(pkl_file) pkl_file.close() df.to_excel(os.path.join(XlsxDir, f'{bo}.xlsx'), index=False) # ### Step 2 Running the TF walker function # # API info: https://annotation.github.io/text-fabric/tf/convert/walker.html # # The logic of interpreting the data is included in the director function. # In[62]: TF = Fabric(locations=BaseDir, silent=False) cv = CV(TF) version = "0.3" ############################################### # Common helper functions # ############################################### #Function to prevent errors during conversion due to missing data def sanitize(input): if isinstance(input, float): return '' if isinstance(input, type(None)): return '' else: return (input) # Function to expand the syntactic categories of words or wordgroup # See also "MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf" # page 5&6 (section 2.4 Syntactic Categories at Clause Level) def ExpandRole(input): if input=="adv": return 'Adverbial' if input=="io": return 'Indirect Object' if input=="o": return 'Object' if input=="o2": return 'Second Object' if input=="s": return 'Subject' if input=="p": return 'Predicate' if input=="v": return 'Verbal' if input=="vc": return 'Verbal Copula' if input=='aux': return 'Auxiliar' return '' # Function to expantion of Part of Speech labels. See also the description in # "MACULA Greek Treebank for the Nestle 1904 Greek New Testament.pdf" page 6&7 # (2.2. Syntactic Categories at Word Level: Part of Speech Labels) def ExpandSP(input): if input=='adj': return 'Adjective' if input=='conj': return 'Conjunction' if input=='det': return 'Determiner' if input=='intj': return 'Interjection' if input=='noun': return 'Noun' if input=='num': return 'Numeral' if input=='prep': return 'Preposition' if input=='ptcl': return 'Particle' if input=='pron': return 'Pronoun' if input=='verb': return 'Verb' return '' ############################################### # The director routine # ############################################### def director(cv): ############################################### # Innitial setup of data etc. # ############################################### NoneType = type(None) # needed as tool to validate certain data IndexDict = {} # init an empty dictionary WordGroupDict={} # init a dummy dictionary PrevWordGroupSet = WordGroupSet = [] PrevWordGroupList = WordGroupList = [] RootWordGroup = 0 WordNumber=FoundWords=WordGroupTrack=0 # The following is required to recover succesfully from an abnormal condition # in the LowFat tree data where a element is labeled as # this number is arbitrary but should be high enough not to clash with 'real' WG numbers DummyWGN=200000 for bo,bookinfo in bo2book.items(): ############################################### # start of section executed for each book # ############################################### # note: bookinfo is a list! Split the data Book = bookinfo[0] BookNumber = int(bookinfo[1]) BookShort = bookinfo[2] BookLoc = os.path.join(PklDir, f'{bo}.pkl') # load data for this book into a dataframe. # make sure wordorder is correct print(f'\tWe are loading {BookLoc}...') pkl_file = open(BookLoc, 'rb') df_unsorted = pickle.load(pkl_file) pkl_file.close() ''' Fill dictionary of column names for this book sort to ensure proper wordorder ''' ItemsInRow=1 for itemname in df_unsorted.columns.to_list(): IndexDict.update({'i_{}'.format(itemname): ItemsInRow}) # This is to identify the collumn containing the key to sort upon if itemname=="{http://www.w3.org/XML/1998/namespace}id": SortKey=ItemsInRow-1 ItemsInRow+=1 df=df_unsorted.sort_values(by=df_unsorted.columns[SortKey]) del df_unsorted # Set up nodes for new book ThisBookPointer = cv.node('book') cv.feature(ThisBookPointer, book=Book, booknumber=BookNumber, bookshort=BookShort) ThisChapterPointer = cv.node('chapter') cv.feature(ThisChapterPointer, chapter=1) PreviousChapter=1 ThisVersePointer = cv.node('verse') cv.feature(ThisVersePointer, verse=1) PreviousVerse=1 ThisSentencePointer = cv.node('sentence') cv.feature(ThisSentencePointer, sentence=1) PreviousSentence=1 ############################################### # Iterate through words and construct objects # ############################################### for row in df.itertuples(): WordNumber += 1 FoundWords +=1 # Detect and act upon changes in sentences, verse and chapter # the order of terminating and creating the nodes is critical: # close verse - close chapter - open chapter - open verse NumberOfParents = sanitize(row[IndexDict.get("i_parents")]) ThisSentence=int(row[IndexDict.get("i_Parent{}SN".format(NumberOfParents-1))]) ThisVerse = sanitize(row[IndexDict.get("i_verse")]) ThisChapter = sanitize(row[IndexDict.get("i_chapter")]) if (ThisSentence!=PreviousSentence): cv.terminate(ThisSentencePointer) if (ThisVerse!=PreviousVerse): cv.terminate(ThisVersePointer) if (ThisChapter!=PreviousChapter): cv.terminate(ThisChapterPointer) PreviousChapter = ThisChapter ThisChapterPointer = cv.node('chapter') cv.feature(ThisChapterPointer, chapter=ThisChapter) if (ThisVerse!=PreviousVerse): PreviousVerse = ThisVerse ThisVersePointer = cv.node('verse') cv.feature(ThisVersePointer, verse=ThisVerse, chapter=ThisChapter) if (ThisSentence!=PreviousSentence): PreviousSentence=ThisSentence ThisSentencePointer = cv.node('sentence') cv.feature(ThisSentencePointer, verse=ThisVerse, chapter=ThisChapter) ############################################### # analyze and process tags # ############################################### PrevWordGroupList=WordGroupList WordGroupList=[] # stores current active WordGroup numbers for i in range(NumberOfParents-2,0,-1): # important: reversed itteration! _WGN=row[IndexDict.get("i_Parent{}WGN".format(i))] if isinstance(_WGN, type(None)): # handling conditions where XML data has e.g. Acts 26:12 # to recover, we need to create a dummy WG with a sufficient high WGN so it can never match any real WGN. WGN=DummyWGN else: WGN=int(_WGN) if WGN!='': WordGroupList.append(WGN) WordGroupDict[(WGN,0)]=WGN WGclass=sanitize(row[IndexDict.get("i_Parent{}Class".format(i))]) WGrule=sanitize(row[IndexDict.get("i_Parent{}Rule".format(i))]) WGtype=sanitize(row[IndexDict.get("i_Parent{}Type".format(i))]) if WGclass==WGrule==WGtype=='': WGclass='to be skipped?' if WGrule[-2:]=='CL' and WGclass=='': WGclass='cl*' # to simulate the way Logos presents this condition WordGroupDict[(WGN,6)]=WGclass WordGroupDict[(WGN,1)]=WGrule WordGroupDict[(WGN,8)]=WGtype WordGroupDict[(WGN,3)]=sanitize(row[IndexDict.get("i_Parent{}Junction".format(i))]) WordGroupDict[(WGN,2)]=sanitize(row[IndexDict.get("i_Parent{}Cltype".format(i))]) WordGroupDict[(WGN,7)]=sanitize(row[IndexDict.get("i_Parent{}Role".format(i))]) WordGroupDict[(WGN,9)]=sanitize(row[IndexDict.get("i_Parent{}Appos".format(i))]) WordGroupDict[(WGN,10)]=NumberOfParents-1-i # = number of parent wordgroups if not PrevWordGroupList==WordGroupList: if RootWordGroup != WordGroupList[0]: RootWordGroup = WordGroupList[0] SuspendableWordGoupList = [] # we have a new sentence. rebuild suspendable wordgroup list # some cleaning of data may be added here to save on memmory... #for k in range(6): del WordGroupDict[item,k] for item in reversed(PrevWordGroupList): if (item not in WordGroupList): # CLOSE/SUSPEND CASE SuspendableWordGoupList.append(item) cv.terminate(WordGroupDict[item,4]) for item in WordGroupList: if (item not in PrevWordGroupList): if (item in SuspendableWordGoupList): # RESUME CASE #print ('\n resume: '+str(item),end=' ') cv.resume(WordGroupDict[(item,4)]) else: # CREATE CASE #print ('\n create: '+str(item),end=' ') WordGroupDict[(item,4)]=cv.node('wg') WordGroupDict[(item,5)]=WordGroupTrack WordGroupTrack += 1 cv.feature(WordGroupDict[(item,4)], wgnum=WordGroupDict[(item,0)], junction=WordGroupDict[(item,3)], clausetype=WordGroupDict[(item,2)], rule=WordGroupDict[(item,1)], wgclass=WordGroupDict[(item,6)], wgrole=WordGroupDict[(item,7)],wgrolelong=ExpandRole(WordGroupDict[(item,7)]), wgtype=WordGroupDict[(item,8)],appos=WordGroupDict[(item,8)],wglevel=WordGroupDict[(item,10)]) # These roles are performed either by a WG or just a single word. Role=row[IndexDict.get("i_role")] ValidRoles=["adv","io","o","o2","s","p","v","vc","aux"] DistanceToRoleClause=0 if isinstance (Role,str) and Role in ValidRoles: # Role is assign to this word (uniqely) WordRole=Role WordRoleLong=ExpandRole(WordRole) else: # Role details needs to be taken from some uptree wordgroup WordRole=WordRoleLong='' for item in range(1,NumberOfParents-1): Role = sanitize(row[IndexDict.get("i_Parent{}Role".format(item))]) if isinstance (Role,str) and Role in ValidRoles: WordRole=Role WordRoleLong=ExpandRole(WordRole) DistanceToRoleClause=item break # Find the number of the WG containing the clause definition for item in range(1,NumberOfParents-1): WGrule = sanitize(row[IndexDict.get("i_Parent{}Rule".format(item))]) if row[IndexDict.get("i_Parent{}Class".format(item))]=='cl' or WGrule[-2:]=='CL': ContainedClause=sanitize(row[IndexDict.get("i_Parent{}WGN".format(item))]) break ############################################### # analyze and process tags # ############################################### # Determine syntactic categories at word level. PartOfSpeech=sanitize(row[IndexDict.get("i_class")]) PartOfSpeechFull=ExpandSP(PartOfSpeech) # The folling part of code reproduces feature 'word' and 'after' that are # currently containing incorrect data in a few specific cases. # See https://github.com/tonyjurg/Nestle1904LFT/blob/main/resources/identifying_odd_afters.ipynb # Get the word details and detect presence of punctuations word=sanitize(row[IndexDict.get("i_unicode")]) match = re.search(r"([\.·—,;])$", word) if match: # The group(0) method is used to retrieve the matched punctuation sign after=match.group(0)+' ' # Remove the punctuation from the end of the word word=word[:-1] else: after=' ' # Some attributes are not present inside some (small) books. The following is to prevent exceptions. degree='' if 'i_degree' in IndexDict: degree=sanitize(row[IndexDict.get("i_degree")]) subjref='' if 'i_subjref' in IndexDict: subjref=sanitize(row[IndexDict.get("i_subjref")]) # Create the word slots this_word = cv.slot() cv.feature(this_word, after= after, unicode= sanitize(row[IndexDict.get("i_unicode")]), word= word, monad= sanitize(row[IndexDict.get("i_monad")]), orig_order= FoundWords, book_long= sanitize(row[IndexDict.get("i_book_long")]), booknumber= BookNumber, bookshort= sanitize(row[IndexDict.get("i_book_short")]), chapter= ThisChapter, ref= sanitize(row[IndexDict.get("i_ref")]), sp= PartOfSpeech, sp_full= PartOfSpeechFull, verse= ThisVerse, sentence= ThisSentence, normalized= sanitize(row[IndexDict.get("i_normalized")]), morph= sanitize(row[IndexDict.get("i_morph")]), strongs= sanitize(row[IndexDict.get("i_strong")]), lex_dom= sanitize(row[IndexDict.get("i_domain")]), ln= sanitize(row[IndexDict.get("i_ln")]), gloss= sanitize(row[IndexDict.get("i_gloss")]), gn= sanitize(row[IndexDict.get("i_gender")]), nu= sanitize(row[IndexDict.get("i_number")]), case= sanitize(row[IndexDict.get("i_case")]), lemma= sanitize(row[IndexDict.get("i_lemma")]), person= sanitize(row[IndexDict.get("i_person")]), mood= sanitize(row[IndexDict.get("i_mood")]), tense= sanitize(row[IndexDict.get("i_tense")]), number= sanitize(row[IndexDict.get("i_number")]), voice= sanitize(row[IndexDict.get("i_voice")]), degree= degree, type= sanitize(row[IndexDict.get("i_type")]), reference= sanitize(row[IndexDict.get("i_ref")]), subj_ref= subjref, nodeID= sanitize(row[4]), #this is a fixed position in dataframe wordrole= WordRole, wordrolelong= WordRoleLong, wordlevel= NumberOfParents-1, roleclausedistance = DistanceToRoleClause, containedclause = ContainedClause ) cv.terminate(this_word) ''' wrap up the book. At the end of the book we need to close all nodes in proper order. ''' # close all open WordGroup nodes for item in WordGroupList: #cv.feature(WordGroupDict[(item,4)], add some stats?) cv.terminate(WordGroupDict[item,4]) cv.terminate(ThisSentencePointer) cv.terminate(ThisVersePointer) cv.terminate(ThisChapterPointer) cv.terminate(ThisBookPointer) # clear dataframe for this book, clear the index dictionary del df IndexDict.clear() gc.collect() ############################################### # end of section executed for each book # ############################################### ############################################### # end of director function # ############################################### ############################################### # Output definitions # ############################################### slotType = 'word' otext = { # dictionary of config data for sections and text formats 'fmt:text-orig-full':'{word}{after}', 'sectionTypes':'book,chapter,verse', 'sectionFeatures':'book,chapter,verse', 'structureFeatures': 'book,chapter,verse', 'structureTypes': 'book,chapter,verse', } # configure metadata generic = { # dictionary of metadata meant for all features 'Name': 'Greek New Testament (Nestle 1904 based on Low Fat Tree)', 'Editors': 'Eberhard Nestle', 'Data source': 'MACULA Greek Linguistic Datasets, available at https://github.com/Clear-Bible/macula-greek/tree/main/Nestle1904/lowfat', 'Availability': 'Creative Commons Attribution 4.0 International (CC BY 4.0)', 'Converter_author': 'Tony Jurg, ReMa Student Vrije Universiteit Amsterdam, Netherlands', 'Converter_execution': 'Tony Jurg, ReMa Student Vrije Universiteit Amsterdam, Netherlands', 'Convertor_source': 'https://github.com/tonyjurg/Nestle1904LFT/tree/main/tools', 'Converter_version': '{}'.format(version), 'TextFabric version': '{}'.format(VERSION) #imported from tf.parameters } # set of integer valued feature names intFeatures = { 'booknumber', 'chapter', 'verse', 'sentence', 'wgnum', 'orig_order', 'monad', 'wglevel' } # per feature dicts with metadata featureMeta = { 'after': {'description': 'Characters (eg. punctuations) following the word'}, 'book': {'description': 'Book'}, 'book_long': {'description': 'Book name (fully spelled out)'}, 'booknumber': {'description': 'NT book number (Matthew=1, Mark=2, ..., Revelation=27)'}, 'bookshort': {'description': 'Book name (abbreviated)'}, 'chapter': {'description': 'Chapter number inside book'}, 'verse': {'description': 'Verse number inside chapter'}, 'sentence': {'description': 'Sentence number (counted per chapter)'}, 'type': {'description': 'Wordgroup type information (verb, verbless, elided, minor, etc.)'}, 'rule': {'description': 'Wordgroup rule information '}, 'orig_order': {'description': 'Word order within corpus (per book)'}, 'monad': {'description': 'Monad (currently: order of words in XML tree file!)'}, 'word': {'description': 'Word as it appears in the text (excl. punctuations)'}, 'unicode': {'description': 'Word as it arears in the text in Unicode (incl. punctuations)'}, 'ref': {'description': 'ref Id'}, 'sp': {'description': 'Part of Speech (abbreviated)'}, 'sp_full': {'description': 'Part of Speech (long description)'}, 'normalized': {'description': 'Surface word stripped of punctations'}, 'lemma': {'description': 'Lexeme (lemma)'}, 'morph': {'description': 'Morphological tag (Sandborg-Petersen morphology)'}, # see also discussion on relation between lex_dom and ln # @ https://github.com/Clear-Bible/macula-greek/issues/29 'lex_dom': {'description': 'Lexical domain according to Semantic Dictionary of Biblical Greek, SDBG (not present everywhere?)'}, 'ln': {'description': 'Lauw-Nida lexical classification (not present everywhere?)'}, 'strongs': {'description': 'Strongs number'}, 'gloss': {'description': 'English gloss'}, 'gn': {'description': 'Gramatical gender (Masculine, Feminine, Neuter)'}, 'nu': {'description': 'Gramatical number (Singular, Plural)'}, 'case': {'description': 'Gramatical case (Nominative, Genitive, Dative, Accusative, Vocative)'}, 'person': {'description': 'Gramatical person of the verb (first, second, third)'}, 'mood': {'description': 'Gramatical mood of the verb (passive, etc)'}, 'tense': {'description': 'Gramatical tense of the verb (e.g. Present, Aorist)'}, 'number': {'description': 'Gramatical number of the verb'}, 'voice': {'description': 'Gramatical voice of the verb'}, 'degree': {'description': 'Degree (e.g. Comparitative, Superlative)'}, 'type': {'description': 'Gramatical type of noun or pronoun (e.g. Common, Personal)'}, 'reference': {'description': 'Reference (to nodeID in XML source data, not yet post-processes)'}, 'subj_ref': {'description': 'Subject reference (to nodeID in XML source data, not yet post-processes)'}, 'nodeID': {'description': 'Node ID (as in the XML source data, not yet post-processes)'}, 'junction': {'description': 'Junction data related to a wordgroup'}, 'wgnum': {'description': 'Wordgroup number (counted per book)'}, 'wgclass': {'description': 'Class of the wordgroup ()'}, 'wgrole': {'description': 'Role of the wordgroup (abbreviated)'}, 'wgrolelong': {'description': 'Role of the wordgroup (full)'}, 'wordrole': {'description': 'Role of the word (abbreviated)'}, 'wordrolelong':{'description': 'Role of the word (full)'}, 'wgtype': {'description': 'Wordgroup type details'}, 'clausetype': {'description': 'Clause type details'}, 'appos': {'description': 'Apposition details'}, 'wglevel': {'description': 'number of parent wordgroups for a wordgroup'}, 'wordlevel': {'description': 'number of parent wordgroups for a word'}, 'roleclausedistance': {'description': 'distance to wordgroup defining the role of this word'}, 'containedclause': {'description': 'Contained clause (WG number)'} } ############################################### # the main function # ############################################### good = cv.walk( director, slotType, otext=otext, generic=generic, intFeatures=intFeatures, featureMeta=featureMeta, warn=False, force=False ) if good: print ("done") # ## 5: Basic testing the textfabric data # ##### [back to TOC](#TOC) # ### Step 1 load the TF data # # The TF will be loaded from github repository https://github.com/tonyjurg/Nestle1904LFT # In[13]: get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # In[14]: # First, I have to laod different modules that I use for analyzing the data and for plotting: import sys, os, collections import pandas as pd import numpy as np import re from tf.fabric import Fabric from tf.app import use # The following cell loads the TextFabric files from github repository. # In[63]: # Loading-the-New-Testament-Text-Fabric N1904 = use ("tonyjurg/Nestle1904LFT:clone", version="0.3", hoist=globals()) # ## Basic testing of the textfabric data # ##### [Back to TOC](#TOC) # # ### Some basic display # # note: the implementation with regards how phrases need to be displayed (esp. with regards to conjunctions) is still to be done. # In[57]: Search0 = ''' book book=Matthew chapter chapter=1 verse verse=20 wg1:wg wgclass=cl wglevel* a:word wordrole=v b:word wordrole=o a .containedclause. b ''' Search0 = N1904.search(Search0) N1904.show(Search0, start=20, end=21, condensed=True, extraFeatures={'containedclause','wordrole', 'roleclausedistance'}, suppress={'chapter'}, withNodes=False) # In[66]: Search0 = ''' book book=John chapter chapter=1 verse verse=1 ''' Search0 = N1904.search(Search0) N1904.show(Search0, start=1, end=2, condensed=True, extraFeatures={'wordrole'}, suppress={'chapter','verse'}, colorMap={4:'pink', 5:'turquoise', 6:'lightblue', 7:'red'}, multiFeatures=False) # ### Step 3 dump some structure information # In[60]: T.structureInfo() # In[67]: TF.features['otext'].metaData # ### Running text fabric browser # # Note that the normal invocation would be: # # `!tf tonyjurg\Nestle1904LFT` # In[11]: get_ipython().system('tf app:\\text-fabric-data\\github\\tonyjurg\\Nestle1904LFT\\app data:\\text-fabric-data\\github\\tonyjurg\\Nestle1904LFT\\tf\\0.2') # In[ ]: