#!/usr/bin/env python # coding: utf-8 # # Parallel Processing of Data # This notebook will enable you to understand how to analyze data in parallel using the map function of MapReduce. # # Please note that the map function used in this notebook is not a real map. A real MapReduce framework like **_Hadoop_** or **_Spark_** requires some additional configuration and normally will not be applied to data that is so small. Therefore, you might found the runtime between different parallel processing notebooks does not vary too much. # In[ ]: import time import math def breakDoc(text,nToBreakInto): textList=[] fLength = len(text) nLinesInEach = int(math.ceil(float(fLength)/nToBreakInto)) for i in range(nToBreakInto): startIndex=i*nLinesInEach endIndex=(i+1)*nLinesInEach if endIndex<=fLength-1: textList.append(text[startIndex:endIndex]) else: textList.append(text[startIndex:]) return textList def loadDocuments(): filename=input('Please Enter the Text You Want to Encipher: ') with open(filename) as f: text=f.read() return text def cipher(text,key): import string stri="" for ch in text: if ch not in string.ascii_letters: stri+=ch else: output = chr(ord(ch) + key) outputNum = ord(output) if 64 < outputNum < 91 or 96