__init__.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. """
  2. (c) 2019 Healthcare/IO 1.0
  3. Vanderbilt University Medical Center, Health Information Privacy Laboratory
  4. https://hiplab.mc.vanderbilt.edu/healthcareio
  5. Authors:
  6. Khanhly Nguyen,
  7. Steve L. Nyemba<steve.l.nyemba@vanderbilt.edu>
  8. License:
  9. MIT, terms are available at https://opensource.org/licenses/MIT
  10. This parser was originally written by Khanhly Nguyen for her internship and is intended to parse x12 835,837 and others provided the appropriate configuration
  11. USAGE :
  12. - COMMAND LINE
  13. - EMBEDDED
  14. """
  15. import hashlib
  16. import json
  17. import os
  18. import sys
  19. from itertools import islice
  20. from multiprocessing import Process
  21. import transport
  22. from transport import providers
  23. import jsonmerge
  24. import copy
  25. class void :
  26. pass
  27. class Formatters :
  28. def __init__(self):
  29. # self.config = config
  30. self.get = void()
  31. self.get.config = self.get_config
  32. self.parse = void()
  33. self.parse.sv3 = self.sv3
  34. self.parse.sv2 = self.sv2
  35. self.sv2_parser = self.sv2
  36. self.sv3_parser = self.sv3
  37. self.sv3_parse = self.sv3
  38. self.format_proc = self.procedure
  39. self.format_diag = self.diagnosis
  40. self.parse.procedure = self.procedure
  41. self.parse.diagnosis = self.diagnosis
  42. self.parse.date = self.date
  43. self.format_date = self.date
  44. self.format_pos = self.pos
  45. self.format_time = self.time
  46. def split(self,row,sep='*',prefix='HI') :
  47. """
  48. This function is designed to split an x12 row and
  49. """
  50. value = []
  51. if row.startswith(prefix) is False:
  52. for row_value in row.replace('~','').split(sep) :
  53. if '>' in row_value and not row_value.startswith('HC'):
  54. # if row_value.startswith('HC') or row_value.startswith('AD'):
  55. if row_value.startswith('AD'):
  56. value += row_value.split('>')[:2]
  57. pass
  58. else:
  59. value += [row_value]
  60. # value += row_value.split('>') if row.startswith('CLM') is False else [row_value]
  61. else :
  62. value.append(row_value.replace('\n',''))
  63. value = [xchar.replace('\r','') for xchar in value] #row.replace('~','').split(sep)
  64. else:
  65. value = [ [prefix]+ self.split(item,'>') for item in row.replace('~','').split(sep)[1:] ]
  66. return value if type(value) == list and type(value[0]) != list else value[0]
  67. def get_config(self,config,row):
  68. """
  69. This function will return the meaningfull parts of the configuration for a given item
  70. """
  71. _row = list(row) if type(row[0]) == str else list(row[0])
  72. _info = config[_row[0]] if _row[0] in config else {}
  73. _rinfo = {}
  74. key = None
  75. if '@ref' in _info:
  76. keys = list(set(_row) & set(_info['@ref'].keys()))
  77. if keys :
  78. _rinfo = {}
  79. for key in keys :
  80. _rinfo = jsonmerge.merge(_rinfo,_info['@ref'][key])
  81. return _rinfo
  82. # key = key[0]
  83. # return _info['@ref'][key]
  84. else:
  85. return {}
  86. if not _info and 'SIMILAR' in config:
  87. #
  88. # Let's look for the nearest key using the edit distance
  89. if _row[0] in config['SIMILAR'] :
  90. key = config['SIMILAR'][_row[0]]
  91. _info = config[key]
  92. return _info
  93. def hash(self,value):
  94. salt = os.environ['HEALTHCAREIO_SALT'] if 'HEALTHCAREIO_SALT' in os.environ else ''
  95. _value = str(value)+ salt
  96. if sys.version_info[0] > 2 :
  97. return hashlib.md5(_value.encode('utf-8')).hexdigest()
  98. else:
  99. return hashlib.md5(_value).hexdigest()
  100. def suppress (self,value):
  101. return 'N/A'
  102. def date(self,value):
  103. value = value if type(value) != list else "-".join(value)
  104. if len(value) > 8 or '-' in value:
  105. #
  106. # This is the case of a thru date i.e the first part should be provided in a 435 entry
  107. #
  108. fdate = "-".join([value[:8][:4],value[:8][4:6],value[:8][6:8]])
  109. tdate = "-".join([value[9:][:4],value[9:][4:6],value[9:][6:8]])
  110. return {"from":fdate,"to":tdate}
  111. if len(value) == 8 :
  112. year = value[:4]
  113. month = value[4:6]
  114. day = value[6:]
  115. return "-".join([year,month,day])[:10] #{"year":year,"month":month,"day":day}
  116. elif len(value) == 6 :
  117. year = '20' + value[:2]
  118. month = value[2:4]
  119. day = value[4:]
  120. elif value.isnumeric() and len(value) >= 10:
  121. #
  122. # Here I a will assume we have a numeric vale
  123. year = value[:4]
  124. month= value[4:6]
  125. day = value[6:8]
  126. else:
  127. #
  128. # We have a date formatting issue
  129. return value
  130. return "-".join([year,month,day])
  131. def time(self,value):
  132. pass
  133. def sv3(self,value):
  134. if '>' in value [1]:
  135. terms = value[1].split('>')
  136. return {'type':terms[0],'code':terms[1],"amount":float(value[2])}
  137. else:
  138. return {"code":value[2],"type":value[1],"amount":float(value[3])}
  139. def sv2(self,value):
  140. #
  141. # @TODO: Sometimes there's a suffix (need to inventory all the variations)
  142. #
  143. if '>' in value or ':' in value:
  144. xchar = '>' if '>' in value else ':'
  145. _values = value.split(xchar)
  146. modifier = {}
  147. if len(_values) > 2 :
  148. modifier= {"code":_values[2]}
  149. if len(_values) > 3 :
  150. modifier['type'] = _values[3]
  151. _value = {"code":_values[1],"type":_values[0]}
  152. if modifier :
  153. _value['modifier'] = modifier
  154. return _value
  155. else:
  156. return value
  157. def procedure(self,value):
  158. for xchar in [':','<','|','>'] :
  159. if xchar in value and len(value.split(xchar)) > 1 :
  160. #_value = {"type":value.split(':')[0].strip(),"code":value.split(':')[1].strip()}
  161. _value = {"type":value.split(xchar)[0].strip(),"code":value.split(xchar)[1].strip()}
  162. if len(value.split(xchar)) >2 :
  163. index = 1;
  164. for modifier in value.split(xchar)[2:] :
  165. _value['modifier_'+str(index)] = modifier
  166. index += 1
  167. break
  168. else:
  169. _value = str(value)
  170. return _value
  171. def diagnosis(self,value):
  172. return [ {"code":item[2], "type":item[1]} for item in value if len(item) > 1]
  173. def parse_loc(self,value):
  174. if ':' in value :
  175. return dict(zip(['place_of_service','claim_indicator','claim_frequency'],value.split(':')))
  176. def pos(self,value):
  177. """
  178. formatting place of service information within a segment (REF)
  179. @TODO: In order to accomodate the other elements they need to be specified in the configuration
  180. Otherwise it causes problems on export
  181. """
  182. xchar = '>' if '>' in value else ':'
  183. x = value.split(xchar)
  184. x = {"place_of_service":x[0],"indicator":x[1],"frequency":x[2]} if len(x) == 3 else {"place_of_service":x[0],"indicator":None,"frequency":None}
  185. return x
  186. class Parser (Process):
  187. @staticmethod
  188. def setup (path):
  189. # self.config = _config['parser']
  190. config = json.loads(open(path).read())
  191. _config = config['parser']
  192. #
  193. # The parser may need some editing provided, this allows ease of developement and using alternate configurations
  194. #
  195. if type(_config['837']) == str or type(_config['835']) == str :
  196. for _id in ['837','835'] :
  197. if type(_config[_id]) == str and os.path.exists(_config[_id]):
  198. _config[_id] = json.loads(open(_config[_id]).read())
  199. if type(_config[_id]) == dict :
  200. _config[_id] = [_config[_id]]
  201. config['parser'] = _config
  202. return config
  203. @staticmethod
  204. def init(**_args):
  205. """
  206. This function allows to initialize the database that will store the claims if need be
  207. :path configuration file
  208. """
  209. PATH = os.sep.join([os.environ['HOME'],'.healthcareio'])
  210. filename = os.sep.join([PATH,'config.json'])
  211. filename = _args['path'] if 'path' in _args else filename
  212. info = None
  213. if os.path.exists(filename):
  214. #
  215. # Loading the configuration file (JSON format)
  216. file = open(filename)
  217. info = json.loads(file.read())
  218. OUTPUT_FOLDER = info['out-folder']
  219. if 'output-folder' not in info and not os.path.exists(OUTPUT_FOLDER) :
  220. os.mkdir(OUTPUT_FOLDER)
  221. elif 'output-folder' in info and not os.path.exists(info['out-folder']) :
  222. os.mkdir(info['out-folder'])
  223. # if 'type' in info['store'] :
  224. lwriter = None
  225. IS_SQL = False
  226. if'type' in info['store'] and info['store']['type'] == 'disk.SQLiteWriter' :
  227. lwriter = transport.factory.instance(**info['store'])
  228. IS_SQL = True
  229. elif 'provider' in info['store'] and info['store']['provider'] == 'sqlite' :
  230. lwriter = transport.instance(**info['store']) ;
  231. IS_SQL = [providers.SQLITE,providers.POSTGRESQL,providers.NETEZZA,providers.MYSQL,providers.MARIADB]
  232. if lwriter and IS_SQL:
  233. for key in info['schema'] :
  234. if key != 'logs' :
  235. _id = 'claims' if key == '837' else 'remits'
  236. else:
  237. _id = key
  238. if not lwriter.has(table=_id) :
  239. lwriter.apply(info['schema'][key]['create'])
  240. # [lwriter.apply( info['schema'][key]['create']) for key in info['schema'] if not lwriter.has(table=key)]
  241. lwriter.close()
  242. return info
  243. def __init__(self,path):
  244. """
  245. :path path of the configuration file (it can be absolute)
  246. """
  247. Process.__init__(self)
  248. self.utils = Formatters()
  249. self.get = void()
  250. self.get.value = self.get_map
  251. self.get.default_value = self.get_default_value
  252. # _config = json.loads(open(path).read())
  253. self._custom_config = self.get_custom(path)
  254. # self.config = _config['parser']
  255. # #
  256. # # The parser may need some editing provided, this allows ease of developement and using alternate configurations
  257. # #
  258. # if type(self.config['837']) == str or type(self.config['835']) == str :
  259. # for _id in ['837','835'] :
  260. # if type(self.config[_id]) == str:
  261. # self.config[_id] = json.loads(open(self.config[_id]).read())
  262. # if type(self.config[_id]) == dict :
  263. # self.config[_id] = [self.config[_id]]
  264. _config = Parser.setup(path)
  265. self.config = _config['parser']
  266. self.store = _config['store']
  267. self.cache = {}
  268. self.files = []
  269. self.set = void()
  270. self.set.files = self.set_files
  271. self.emit = void()
  272. self.emit.pre = None
  273. self.emit.post = None
  274. def get_custom(self,path) :
  275. """
  276. :path path of the configuration file (it can be absolute)
  277. """
  278. #
  279. #
  280. _path = path.replace('config.json','')
  281. if _path.endswith(os.sep) :
  282. _path = _path[:-1]
  283. _config = {}
  284. _path = os.sep.join([_path,'custom'])
  285. if os.path.exists(_path) :
  286. files = os.listdir(_path)
  287. if files :
  288. fullname = os.sep.join([_path,files[0]])
  289. _config = json.loads ( (open(fullname)).read() )
  290. return _config
  291. def set_files(self,files):
  292. self.files = files
  293. def get_map(self,row,config,version=None):
  294. # label = config['label'] if 'label' in config else None
  295. handler = Formatters()
  296. if 'map' not in config and hasattr(handler,config['apply']):
  297. pointer = getattr(handler,config['apply'])
  298. object_value = pointer(row)
  299. return object_value
  300. #
  301. # Pull the goto configuration that skips rows
  302. #
  303. omap = config['map'] if not version or version not in config else config[version]
  304. anchors = config['anchors'] if 'anchors' in config else []
  305. rewrite = config['rewrite'] if 'rewrite' in config else {}
  306. if len(row) == 2 and row[0] == 'HI' :
  307. row = ([row[0]] + row[1].split(':'))
  308. if type(row[0]) == str:
  309. object_value = {}
  310. for key in omap :
  311. index = omap[key]
  312. if anchors and set(anchors) & set(row):
  313. _key = list(set(anchors) & set(row))[0]
  314. aindex = row.index(_key)
  315. index = aindex + index
  316. if index < len(row) :
  317. value = row[index]
  318. if 'cast' in config and key in config['cast'] and value.strip() != '' :
  319. if config['cast'][key] in ['float','int']:
  320. try:
  321. value = eval(config['cast'][key])(value)
  322. except Exception as e:
  323. pass
  324. #
  325. # Sometimes shit hits the fan when the anchor is missing
  326. # This is typical but using the hardened function helps circumvent this (SV2,SV3)
  327. #
  328. elif hasattr(handler,config['cast'][key]):
  329. pointer = getattr(handler,config['cast'][key])
  330. value = pointer(value)
  331. else:
  332. print ("Missing Pointer ",key,config['cast'])
  333. if type(value) == dict :
  334. for objkey in value :
  335. if type(value[objkey]) == dict :
  336. continue
  337. if 'syn' in config and value[objkey] in config['syn'] :
  338. # value[objkey] = config['syn'][ value[objkey]]
  339. pass
  340. if key in rewrite :
  341. _key = rewrite[key]
  342. if _key in value :
  343. value = value[_key]
  344. else:
  345. value = ""
  346. value = {key:value} if key not in value else value
  347. else:
  348. if 'syn' in config and value in config['syn'] :
  349. # value = config['syn'][value]
  350. pass
  351. if type(value) == dict :
  352. object_value = jsonmerge.merge(object_value, value)
  353. else:
  354. object_value[key] = value
  355. else:
  356. #
  357. # we are dealing with a complex object
  358. object_value = []
  359. for row_item in row :
  360. value = self.get.value(row_item,config,version)
  361. object_value.append(value)
  362. return object_value
  363. def set_cache(self,tmp,_info) :
  364. """
  365. insert into cache a value that the, these are in reference to a loop
  366. """
  367. if 'cache' in _info :
  368. key = _info['cache']['key']
  369. value=_info['cache']['value']
  370. field = _info['cache']['field']
  371. if value in tmp :
  372. self.cache [key] = {field:tmp[value]}
  373. pass
  374. def get_cache(self,row) :
  375. """
  376. retrieve cache element for a current
  377. """
  378. key = row[0]
  379. return self.cache[key] if key in self.cache else {}
  380. def apply(self,content,_code) :
  381. """
  382. :content content of a file i.e a segment with the envelope
  383. :_code 837 or 835 (helps get the appropriate configuration)
  384. """
  385. util = Formatters()
  386. # header = default_value.copy()
  387. value = {}
  388. for row in content[:] :
  389. row = util.split(row.replace('\n','').replace('~',''))
  390. _info = util.get.config(self.config[_code][0],row)
  391. if self._custom_config and _code in self._custom_config:
  392. _cinfo = util.get.config(self._custom_config[_code],row)
  393. else:
  394. _cinfo = {}
  395. if _info or _cinfo:
  396. try:
  397. _info = jsonmerge.merge(_info,_cinfo)
  398. tmp = self.get.value(row,_info)
  399. if not tmp :
  400. continue
  401. #
  402. # At this point we have the configuration and the row parsed into values
  403. # We should check to see if we don't have anything in the cache to be added to it
  404. #
  405. if row[0] in self.cache :
  406. tmp = jsonmerge.merge(tmp,self.get_cache(row))
  407. if 'label' in _info :
  408. label = _info['label']
  409. if type(tmp) == list :
  410. value[label] = tmp if label not in value else value[label] + tmp
  411. else:
  412. # if 'DTM' in row :
  413. # print ([label,tmp,label in value])
  414. if label not in value :
  415. value[label] = []
  416. value[label].append(tmp)
  417. # if label not in value:
  418. # value[label] = [tmp]
  419. # else:
  420. # value[label].append(tmp)
  421. if '_index' not in tmp :
  422. #
  423. # In case we asked it to be overriden, then this will not apply
  424. # X12 occasionally requires references to other elements in a loop (alas)
  425. #
  426. tmp['_index'] = len(value[label]) -1
  427. elif 'field' in _info :
  428. name = _info['field']
  429. # value[name] = tmp
  430. # value = jsonmerge.merge(value,{name:tmp})
  431. if name not in value :
  432. value = dict(value,**{name:tmp})
  433. else:
  434. value[name] = dict(value[name],**tmp)
  435. else:
  436. value = dict(value,**tmp)
  437. pass
  438. except Exception as e :
  439. print (e.args[0])
  440. # print ('__',(dir(e.args)))
  441. pass
  442. #
  443. # At this point the object is completely built,
  444. # if there ar any attributes to be cached it will be done here
  445. #
  446. if 'cache' in _info :
  447. self.set_cache(tmp,_info)
  448. return value if value else {}
  449. def get_default_value(self,content,_code):
  450. util = Formatters()
  451. TOP_ROW = content[1].split('*')
  452. SUBMITTED_DATE = util.parse.date(TOP_ROW[4])
  453. CATEGORY= content[2].split('*')[1].strip()
  454. VERSION = content[1].split('*')[-1].replace('~','').replace('\n','')
  455. SENDER_ID = TOP_ROW[2]
  456. row = util.split(content[3])
  457. _info = util.get_config(self.config[_code][0],row)
  458. value = self.get.value(row,_info,VERSION) if _info else {}
  459. value['category'] = {"setid": _code,"version":'X'+VERSION.split('X')[1],"id":VERSION.split('X')[0].strip()}
  460. value["submitted"] = SUBMITTED_DATE
  461. value['sender_id'] = SENDER_ID
  462. # value = dict(value,**self.apply(content,_code))
  463. value = jsonmerge.merge(value,self.apply(content,_code))
  464. # Let's parse this for default values
  465. return value #jsonmerge.merge(value,self.apply(content,_code))
  466. def read(self,filename) :
  467. """
  468. :formerly get_content
  469. This function returns the of the EDI file parsed given the configuration specified. it is capable of identifying a file given the content
  470. :section loop prefix (HL, CLP)
  471. :config configuration with formatting rules, labels ...
  472. :filename location of the file
  473. """
  474. # section = section if section else config['SECTION']
  475. logs = []
  476. claims = []
  477. _code = 'UNKNOWN'
  478. try:
  479. self.cache = {}
  480. file = open(filename.strip())
  481. file = file.read().split('CLP')
  482. _code = '835'
  483. section = 'CLP'
  484. if len(file) == 1 :
  485. file = file[0].split('CLM') #.split('HL')
  486. _code = '837'
  487. section = 'CLM' #'HL'
  488. INITIAL_ROWS = file[0].split(section)[0].split('\n')
  489. if len(INITIAL_ROWS) == 1 :
  490. INITIAL_ROWS = INITIAL_ROWS[0].split('~')
  491. # for item in file[1:] :
  492. # item = item.replace('~','\n')
  493. # print (INITIAL_ROWS)
  494. DEFAULT_VALUE = self.get.default_value(INITIAL_ROWS,_code)
  495. DEFAULT_VALUE['name'] = filename.strip()
  496. file = section.join(file).split('\n')
  497. if len(file) == 1:
  498. file = file[0].split('~')
  499. #
  500. # In the initial rows, there's redundant information (so much for x12 standard)
  501. # index 1 identifies file type i.e CLM for claim and CLP for remittance
  502. segment = []
  503. index = 0;
  504. _toprows = []
  505. _default = None
  506. for row in file :
  507. row = row.replace('\r','')
  508. # if not segment and not row.startswith(section):
  509. # _toprows += [row]
  510. if row.startswith(section) and not segment:
  511. segment = [row]
  512. continue
  513. elif segment and not row.startswith(section):
  514. segment.append(row)
  515. if len(segment) > 1 and row.startswith(section):
  516. #
  517. # process the segment somewhere (create a thread maybe?)
  518. #
  519. _claim = self.apply(segment,_code)
  520. if _claim :
  521. _claim['index'] = index #len(claims)
  522. # claims.append(dict(DEFAULT_VALUE,**_claim))
  523. #
  524. # schema = [ {key:{"mergeStrategy":"append" if list( type(_claim[key])) else "overwrite"}} for key in _claim.keys()] # if type(_claim[key]) == list]
  525. # _schema = set(DEFAULT_VALUE.keys()) - schema
  526. # if schema :
  527. # schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
  528. # else:
  529. # schema = {"properties":{}}
  530. # schema = jsonmerge.merge(schema['properties'],dict.fromkeys(_schema,{"mergeStrategy":"overwrite"}))
  531. schema = {"properties":{}}
  532. for attr in _claim.keys() :
  533. schema['properties'][attr] = {"mergeStrategy": "append" if type(_claim[attr]) == list else "overwrite" }
  534. merger = jsonmerge.Merger(schema)
  535. _baseclaim = None
  536. _baseclaim = merger.merge(_baseclaim,copy.deepcopy(DEFAULT_VALUE))
  537. _claim = merger.merge(_baseclaim,_claim)
  538. # _claim = merger.merge(DEFAULT_VALUE.copy(),_claim)
  539. claims.append( _claim)
  540. segment = [row]
  541. index += 1
  542. pass
  543. #
  544. # Handling the last claim found
  545. if segment and segment[0].startswith(section) :
  546. # default_claim = dict({"name":index},**DEFAULT_VALUE)
  547. claim = self.apply(segment,_code)
  548. if claim :
  549. claim['index'] = len(claims)
  550. # schema = [key for key in claim.keys() if type(claim[key]) == list]
  551. # if schema :
  552. # schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
  553. # else:
  554. # print (claim.keys())
  555. # schema = {}
  556. #
  557. # @TODO: Fix merger related to schema (drops certain fields ... NOT cool)
  558. # merger = jsonmerge.Merger(schema)
  559. # top_row_claim = self.apply(_toprows,_code)
  560. # claim = merger.merge(claim,self.apply(_toprows,_code))
  561. # claims.append(dict(DEFAULT_VALUE,**claim))
  562. schema = {"properties":{}}
  563. for attr in claim.keys() :
  564. schema['properties'][attr] = {"mergeStrategy": "append" if type(claim[attr]) == list else "overwrite" }
  565. merger = jsonmerge.Merger(schema)
  566. _baseclaim = None
  567. _baseclaim = merger.merge(_baseclaim,copy.deepcopy(DEFAULT_VALUE))
  568. claim = merger.merge(_baseclaim,claim)
  569. claims.append(claim)
  570. # claims.append(merger.merge(DEFAULT_VALUE.copy(),claim))
  571. if type(file) != list :
  572. file.close()
  573. # x12_file = open(filename.strip(),errors='ignore').read().split('\n')
  574. except Exception as e:
  575. logs.append ({"parse":_code,"completed":False,"name":filename,"msg":e.args[0]})
  576. return [],logs,None
  577. rate = 0 if len(claims) == 0 else (1 + index)/len(claims)
  578. logs.append ({"parse":"claims" if _code == '837' else 'remits',"completed":True,"name":filename,"rate":rate})
  579. # self.finish(claims,logs,_code)
  580. return claims,logs,_code
  581. def run(self):
  582. if self.emit.pre :
  583. self.emit.pre()
  584. for filename in self.files :
  585. content,logs,_code = self.read(filename)
  586. self.finish(content,logs,_code)
  587. def finish(self,content,logs,_code) :
  588. args = self.store
  589. _args = json.loads(json.dumps(self.store))
  590. ISNEW_MONGO = 'provider' in args and args['provider'] in ['mongo', 'mongodb']
  591. ISLEG_MONGO = ('type' in args and args['type'] == 'mongo.MongoWriter')
  592. if ISLEG_MONGO or ISNEW_MONGO:
  593. if ISLEG_MONGO:
  594. # Legacy specification ...
  595. args['args']['doc'] = 'claims' if _code == '837' else 'remits'
  596. _args['args']['doc'] = 'logs'
  597. else:
  598. args['doc'] = 'claims' if _code == '837' else 'remits'
  599. _args['doc'] = 'logs'
  600. else:
  601. if 'type' in args :
  602. # Legacy specification ...
  603. args['args']['table'] = 'claims' if _code == '837' else 'remits'
  604. _args['args']['table'] = 'logs'
  605. table = args['args']['table']
  606. else:
  607. args['table']= 'claims' if _code == '837' else 'remits'
  608. _args['table'] = 'logs'
  609. table = args['table']
  610. writer = transport.factory.instance(**args)
  611. IS_SQLITE = type(writer) == transport.disk.SQLiteWriter
  612. if content:
  613. if IS_SQLITE :
  614. for row in content :
  615. writer.apply("""insert into :table(data) values (':values')""".replace(":values",json.dumps(row)).replace(":table",table) )
  616. else:
  617. writer.write(content)
  618. writer.close()
  619. if logs :
  620. logger = transport.factory.instance(**_args)
  621. if IS_SQLITE:
  622. for row in logs:
  623. logger.apply("""insert into logs values (':values')""".replace(":values",json.dumps(row)))
  624. else:
  625. logger.write(logs)
  626. logger.close()
  627. if self.emit.post :
  628. self.emit.post(content,logs)