__init__.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. """
  2. (c) 2019 Healthcare/IO 1.0
  3. Vanderbilt University Medical Center, Health Information Privacy Laboratory
  4. https://hiplab.mc.vanderbilt.edu/healthcareio
  5. Authors:
  6. Khanhly Nguyen,
  7. Steve L. Nyemba<steve.l.nyemba@vanderbilt.edu>
  8. License:
  9. MIT, terms are available at https://opensource.org/licenses/MIT
  10. This parser was originally written by Khanhly Nguyen for her internship and is intended to parse x12 835,837 and others provided the appropriate configuration
  11. USAGE :
  12. - COMMAND LINE
  13. - EMBEDDED
  14. """
  15. # import hashlib
  16. # import json
  17. # import os
  18. # import sys
  19. # # version 2.0
  20. # # import util
  21. # # from parser import X12Parser
  22. # #-- end
  23. # from itertools import islice
  24. # from multiprocessing import Process
  25. # import transport
  26. # from transport import providers
  27. # import jsonmerge
  28. # # import plugins
  29. # import copy
  30. # class void :
  31. # pass
  32. # class Formatters :
  33. # def __init__(self):
  34. # # self.config = config
  35. # self.get = void()
  36. # self.get.config = self.get_config
  37. # self.parse = void()
  38. # self.parse.sv3 = self.sv3
  39. # self.parse.sv2 = self.sv2
  40. # self.sv2_parser = self.sv2
  41. # self.sv3_parser = self.sv3
  42. # self.sv3_parse = self.sv3
  43. # self.format_proc = self.procedure
  44. # self.format_diag = self.diagnosis
  45. # self.parse.procedure = self.procedure
  46. # self.parse.diagnosis = self.diagnosis
  47. # self.parse.date = self.date
  48. # self.format_date = self.date
  49. # self.format_pos = self.pos
  50. # self.format_time = self.time
  51. # def split(self,row,sep='*',prefix='HI') :
  52. # """
  53. # This function is designed to split an x12 row and
  54. # """
  55. # value = []
  56. # if row.startswith(prefix) is False:
  57. # for row_value in row.replace('~','').split(sep) :
  58. # if '>' in row_value and not row_value.startswith('HC'):
  59. # # if row_value.startswith('HC') or row_value.startswith('AD'):
  60. # if row_value.startswith('AD'):
  61. # value += row_value.split('>')[:2]
  62. # pass
  63. # else:
  64. # value += [row_value]
  65. # # value += row_value.split('>') if row.startswith('CLM') is False else [row_value]
  66. # else :
  67. # value.append(row_value.replace('\n',''))
  68. # value = [xchar.replace('\r','') for xchar in value] #row.replace('~','').split(sep)
  69. # else:
  70. # value = [ [prefix]+ self.split(item,'>') for item in row.replace('~','').split(sep)[1:] ]
  71. # return value if type(value) == list and type(value[0]) != list else value[0]
  72. # def get_config(self,config,row):
  73. # """
  74. # This function will return the meaningfull parts of the configuration for a given item
  75. # """
  76. # _row = list(row) if type(row[0]) == str else list(row[0])
  77. # _info = config[_row[0]] if _row[0] in config else {}
  78. # _rinfo = {}
  79. # key = None
  80. # if '@ref' in _info:
  81. # keys = list(set(_row) & set(_info['@ref'].keys()))
  82. # if keys :
  83. # _rinfo = {}
  84. # for key in keys :
  85. # _rinfo = jsonmerge.merge(_rinfo,_info['@ref'][key])
  86. # return _rinfo
  87. # # key = key[0]
  88. # # return _info['@ref'][key]
  89. # else:
  90. # return {}
  91. # if not _info and 'SIMILAR' in config:
  92. # #
  93. # # Let's look for the nearest key using the edit distance
  94. # if _row[0] in config['SIMILAR'] :
  95. # key = config['SIMILAR'][_row[0]]
  96. # _info = config[key]
  97. # return _info
  98. # def hash(self,value):
  99. # salt = os.environ['HEALTHCAREIO_SALT'] if 'HEALTHCAREIO_SALT' in os.environ else ''
  100. # _value = str(value)+ salt
  101. # if sys.version_info[0] > 2 :
  102. # return hashlib.md5(_value.encode('utf-8')).hexdigest()
  103. # else:
  104. # return hashlib.md5(_value).hexdigest()
  105. # def suppress (self,value):
  106. # return 'N/A'
  107. # def date(self,value):
  108. # value = value if type(value) != list else "-".join(value)
  109. # if len(value) > 8 or '-' in value:
  110. # #
  111. # # This is the case of a thru date i.e the first part should be provided in a 435 entry
  112. # #
  113. # fdate = "-".join([value[:8][:4],value[:8][4:6],value[:8][6:8]])
  114. # tdate = "-".join([value[9:][:4],value[9:][4:6],value[9:][6:8]])
  115. # return {"from":fdate,"to":tdate}
  116. # if len(value) == 8 :
  117. # year = value[:4]
  118. # month = value[4:6]
  119. # day = value[6:]
  120. # return "-".join([year,month,day])[:10] #{"year":year,"month":month,"day":day}
  121. # elif len(value) == 6 :
  122. # year = '20' + value[:2]
  123. # month = value[2:4]
  124. # day = value[4:]
  125. # elif value.isnumeric() and len(value) >= 10:
  126. # #
  127. # # Here I a will assume we have a numeric vale
  128. # year = value[:4]
  129. # month= value[4:6]
  130. # day = value[6:8]
  131. # else:
  132. # #
  133. # # We have a date formatting issue
  134. # return value
  135. # return "-".join([year,month,day])
  136. # def time(self,value):
  137. # pass
  138. # def sv3(self,value):
  139. # if '>' in value [1]:
  140. # terms = value[1].split('>')
  141. # return {'type':terms[0],'code':terms[1],"amount":float(value[2])}
  142. # else:
  143. # return {"code":value[2],"type":value[1],"amount":float(value[3])}
  144. # def sv2(self,value):
  145. # #
  146. # # @TODO: Sometimes there's a suffix (need to inventory all the variations)
  147. # #
  148. # if '>' in value or ':' in value:
  149. # xchar = '>' if '>' in value else ':'
  150. # _values = value.split(xchar)
  151. # modifier = {}
  152. # if len(_values) > 2 :
  153. # modifier= {"code":_values[2]}
  154. # if len(_values) > 3 :
  155. # modifier['type'] = _values[3]
  156. # _value = {"code":_values[1],"type":_values[0]}
  157. # if modifier :
  158. # _value['modifier'] = modifier
  159. # return _value
  160. # else:
  161. # return value
  162. # def procedure(self,value):
  163. # for xchar in [':','<','|','>'] :
  164. # if xchar in value and len(value.split(xchar)) > 1 :
  165. # #_value = {"type":value.split(':')[0].strip(),"code":value.split(':')[1].strip()}
  166. # _value = {"type":value.split(xchar)[0].strip(),"code":value.split(xchar)[1].strip()}
  167. # if len(value.split(xchar)) >2 :
  168. # index = 1;
  169. # for modifier in value.split(xchar)[2:] :
  170. # _value['modifier_'+str(index)] = modifier
  171. # index += 1
  172. # break
  173. # else:
  174. # _value = str(value)
  175. # return _value
  176. # def diagnosis(self,value):
  177. # return [ {"code":item[2], "type":item[1]} for item in value if len(item) > 1]
  178. # def parse_loc(self,value):
  179. # if ':' in value :
  180. # return dict(zip(['place_of_service','claim_indicator','claim_frequency'],value.split(':')))
  181. # def pos(self,value):
  182. # """
  183. # formatting place of service information within a segment (REF)
  184. # @TODO: In order to accomodate the other elements they need to be specified in the configuration
  185. # Otherwise it causes problems on export
  186. # """
  187. # xchar = '>' if '>' in value else ':'
  188. # x = value.split(xchar)
  189. # x = {"place_of_service":x[0],"indicator":x[1],"frequency":x[2]} if len(x) == 3 else {"place_of_service":x[0],"indicator":None,"frequency":None}
  190. # return x
  191. # class Parser (Process):
  192. # @staticmethod
  193. # def setup (path):
  194. # # self.config = _config['parser']
  195. # config = json.loads(open(path).read())
  196. # _config = config['parser']
  197. # #
  198. # # The parser may need some editing provided, this allows ease of developement and using alternate configurations
  199. # #
  200. # if type(_config['837']) == str or type(_config['835']) == str :
  201. # for _id in ['837','835'] :
  202. # if type(_config[_id]) == str and os.path.exists(_config[_id]):
  203. # _config[_id] = json.loads(open(_config[_id]).read())
  204. # if type(_config[_id]) == dict :
  205. # _config[_id] = [_config[_id]]
  206. # config['parser'] = _config
  207. # return config
  208. # @staticmethod
  209. # def init(**_args):
  210. # """
  211. # This function allows to initialize the database that will store the claims if need be
  212. # :path configuration file
  213. # """
  214. # PATH = os.sep.join([os.environ['HOME'],'.healthcareio'])
  215. # filename = os.sep.join([PATH,'config.json'])
  216. # filename = _args['path'] if 'path' in _args else filename
  217. # info = None
  218. # if os.path.exists(filename):
  219. # #
  220. # # Loading the configuration file (JSON format)
  221. # file = open(filename)
  222. # info = json.loads(file.read())
  223. # OUTPUT_FOLDER = info['out-folder']
  224. # if 'output-folder' not in info and not os.path.exists(OUTPUT_FOLDER) :
  225. # os.mkdir(OUTPUT_FOLDER)
  226. # elif 'output-folder' in info and not os.path.exists(info['out-folder']) :
  227. # os.mkdir(info['out-folder'])
  228. # # if 'type' in info['store'] :
  229. # lwriter = None
  230. # IS_SQL = False
  231. # if'type' in info['store'] and info['store']['type'] == 'disk.SQLiteWriter' :
  232. # lwriter = transport.factory.instance(**info['store'])
  233. # IS_SQL = True
  234. # elif 'provider' in info['store'] and info['store']['provider'] == 'sqlite' :
  235. # lwriter = transport.instance(**info['store']) ;
  236. # IS_SQL = [providers.SQLITE,providers.POSTGRESQL,providers.NETEZZA,providers.MYSQL,providers.MARIADB]
  237. # if lwriter and IS_SQL:
  238. # for key in info['schema'] :
  239. # if key != 'logs' :
  240. # _id = 'claims' if key == '837' else 'remits'
  241. # else:
  242. # _id = key
  243. # if not lwriter.has(table=_id) :
  244. # lwriter.apply(info['schema'][key]['create'])
  245. # # [lwriter.apply( info['schema'][key]['create']) for key in info['schema'] if not lwriter.has(table=key)]
  246. # lwriter.close()
  247. # return info
  248. # def __init__(self,path):
  249. # """
  250. # :path path of the configuration file (it can be absolute)
  251. # """
  252. # Process.__init__(self)
  253. # self.utils = Formatters()
  254. # self.get = void()
  255. # self.get.value = self.get_map
  256. # self.get.default_value = self.get_default_value
  257. # # _config = json.loads(open(path).read())
  258. # self._custom_config = self.get_custom(path)
  259. # # self.config = _config['parser']
  260. # # #
  261. # # # The parser may need some editing provided, this allows ease of developement and using alternate configurations
  262. # # #
  263. # # if type(self.config['837']) == str or type(self.config['835']) == str :
  264. # # for _id in ['837','835'] :
  265. # # if type(self.config[_id]) == str:
  266. # # self.config[_id] = json.loads(open(self.config[_id]).read())
  267. # # if type(self.config[_id]) == dict :
  268. # # self.config[_id] = [self.config[_id]]
  269. # _config = Parser.setup(path)
  270. # self.config = _config['parser']
  271. # self.store = _config['store']
  272. # self.cache = {}
  273. # self.files = []
  274. # self.set = void()
  275. # self.set.files = self.set_files
  276. # self.emit = void()
  277. # self.emit.pre = None
  278. # self.emit.post = None
  279. # def get_custom(self,path) :
  280. # """
  281. # :path path of the configuration file (it can be absolute)
  282. # """
  283. # #
  284. # #
  285. # _path = path.replace('config.json','')
  286. # if _path.endswith(os.sep) :
  287. # _path = _path[:-1]
  288. # _config = {}
  289. # _path = os.sep.join([_path,'custom'])
  290. # if os.path.exists(_path) :
  291. # files = os.listdir(_path)
  292. # if files :
  293. # fullname = os.sep.join([_path,files[0]])
  294. # _config = json.loads ( (open(fullname)).read() )
  295. # return _config
  296. # def set_files(self,files):
  297. # self.files = files
  298. # def get_map(self,row,config,version=None):
  299. # # label = config['label'] if 'label' in config else None
  300. # handler = Formatters()
  301. # if 'map' not in config and hasattr(handler,config['apply']):
  302. # pointer = getattr(handler,config['apply'])
  303. # object_value = pointer(row)
  304. # return object_value
  305. # #
  306. # # Pull the goto configuration that skips rows
  307. # #
  308. # omap = config['map'] if not version or version not in config else config[version]
  309. # anchors = config['anchors'] if 'anchors' in config else []
  310. # rewrite = config['rewrite'] if 'rewrite' in config else {}
  311. # if len(row) == 2 and row[0] == 'HI' :
  312. # row = ([row[0]] + row[1].split(':'))
  313. # if type(row[0]) == str:
  314. # object_value = {}
  315. # for key in omap :
  316. # index = omap[key]
  317. # if anchors and set(anchors) & set(row):
  318. # _key = list(set(anchors) & set(row))[0]
  319. # aindex = row.index(_key)
  320. # index = aindex + index
  321. # if index < len(row) :
  322. # value = row[index]
  323. # if 'cast' in config and key in config['cast'] and value.strip() != '' :
  324. # if config['cast'][key] in ['float','int']:
  325. # try:
  326. # value = eval(config['cast'][key])(value)
  327. # except Exception as e:
  328. # pass
  329. # #
  330. # # Sometimes shit hits the fan when the anchor is missing
  331. # # This is typical but using the hardened function helps circumvent this (SV2,SV3)
  332. # #
  333. # elif hasattr(handler,config['cast'][key]):
  334. # pointer = getattr(handler,config['cast'][key])
  335. # value = pointer(value)
  336. # else:
  337. # print ("Missing Pointer ",key,config['cast'])
  338. # if type(value) == dict :
  339. # for objkey in value :
  340. # if type(value[objkey]) == dict :
  341. # continue
  342. # if 'syn' in config and value[objkey] in config['syn'] :
  343. # # value[objkey] = config['syn'][ value[objkey]]
  344. # pass
  345. # if key in rewrite :
  346. # _key = rewrite[key]
  347. # if _key in value :
  348. # value = value[_key]
  349. # else:
  350. # value = ""
  351. # value = {key:value} if key not in value else value
  352. # else:
  353. # if 'syn' in config and value in config['syn'] :
  354. # # value = config['syn'][value]
  355. # pass
  356. # if type(value) == dict :
  357. # object_value = jsonmerge.merge(object_value, value)
  358. # else:
  359. # object_value[key] = value
  360. # else:
  361. # #
  362. # # we are dealing with a complex object
  363. # object_value = []
  364. # for row_item in row :
  365. # value = self.get.value(row_item,config,version)
  366. # object_value.append(value)
  367. # return object_value
  368. # def set_cache(self,tmp,_info) :
  369. # """
  370. # insert into cache a value that the, these are in reference to a loop
  371. # """
  372. # if 'cache' in _info :
  373. # key = _info['cache']['key']
  374. # value=_info['cache']['value']
  375. # field = _info['cache']['field']
  376. # if value in tmp :
  377. # self.cache [key] = {field:tmp[value]}
  378. # pass
  379. # def get_cache(self,row) :
  380. # """
  381. # retrieve cache element for a current
  382. # """
  383. # key = row[0]
  384. # return self.cache[key] if key in self.cache else {}
  385. # def apply(self,content,_code) :
  386. # """
  387. # :content content of a file i.e a segment with the envelope
  388. # :_code 837 or 835 (helps get the appropriate configuration)
  389. # """
  390. # util = Formatters()
  391. # # header = default_value.copy()
  392. # value = {}
  393. # for row in content[:] :
  394. # row = util.split(row.replace('\n','').replace('~',''))
  395. # _info = util.get.config(self.config[_code][0],row)
  396. # if self._custom_config and _code in self._custom_config:
  397. # _cinfo = util.get.config(self._custom_config[_code],row)
  398. # else:
  399. # _cinfo = {}
  400. # if _info or _cinfo:
  401. # try:
  402. # _info = jsonmerge.merge(_info,_cinfo)
  403. # tmp = self.get.value(row,_info)
  404. # if not tmp :
  405. # continue
  406. # #
  407. # # At this point we have the configuration and the row parsed into values
  408. # # We should check to see if we don't have anything in the cache to be added to it
  409. # #
  410. # if row[0] in self.cache :
  411. # tmp = jsonmerge.merge(tmp,self.get_cache(row))
  412. # if 'label' in _info :
  413. # label = _info['label']
  414. # if type(tmp) == list :
  415. # value[label] = tmp if label not in value else value[label] + tmp
  416. # else:
  417. # # if 'DTM' in row :
  418. # # print ([label,tmp,label in value])
  419. # if label not in value :
  420. # value[label] = []
  421. # value[label].append(tmp)
  422. # # if label not in value:
  423. # # value[label] = [tmp]
  424. # # else:
  425. # # value[label].append(tmp)
  426. # if '_index' not in tmp :
  427. # #
  428. # # In case we asked it to be overriden, then this will not apply
  429. # # X12 occasionally requires references to other elements in a loop (alas)
  430. # #
  431. # tmp['_index'] = len(value[label]) -1
  432. # elif 'field' in _info :
  433. # name = _info['field']
  434. # # value[name] = tmp
  435. # # value = jsonmerge.merge(value,{name:tmp})
  436. # if name not in value :
  437. # value = dict(value,**{name:tmp})
  438. # else:
  439. # value[name] = dict(value[name],**tmp)
  440. # else:
  441. # value = dict(value,**tmp)
  442. # pass
  443. # except Exception as e :
  444. # print (e.args[0])
  445. # # print ('__',(dir(e.args)))
  446. # pass
  447. # #
  448. # # At this point the object is completely built,
  449. # # if there ar any attributes to be cached it will be done here
  450. # #
  451. # if 'cache' in _info :
  452. # self.set_cache(tmp,_info)
  453. # return value if value else {}
  454. # def get_default_value(self,content,_code):
  455. # util = Formatters()
  456. # TOP_ROW = content[1].split('*')
  457. # SUBMITTED_DATE = util.parse.date(TOP_ROW[4])
  458. # CATEGORY= content[2].split('*')[1].strip()
  459. # VERSION = content[1].split('*')[-1].replace('~','').replace('\n','')
  460. # SENDER_ID = TOP_ROW[2]
  461. # row = util.split(content[3])
  462. # _info = util.get_config(self.config[_code][0],row)
  463. # value = self.get.value(row,_info,VERSION) if _info else {}
  464. # value['category'] = {"setid": _code,"version":'X'+VERSION.split('X')[1],"id":VERSION.split('X')[0].strip()}
  465. # value["submitted"] = SUBMITTED_DATE
  466. # value['sender_id'] = SENDER_ID
  467. # # value = dict(value,**self.apply(content,_code))
  468. # value = jsonmerge.merge(value,self.apply(content,_code))
  469. # # Let's parse this for default values
  470. # return value #jsonmerge.merge(value,self.apply(content,_code))
  471. # def read(self,filename) :
  472. # """
  473. # :formerly get_content
  474. # This function returns the of the EDI file parsed given the configuration specified. it is capable of identifying a file given the content
  475. # :section loop prefix (HL, CLP)
  476. # :config configuration with formatting rules, labels ...
  477. # :filename location of the file
  478. # """
  479. # # section = section if section else config['SECTION']
  480. # logs = []
  481. # claims = []
  482. # _code = 'UNKNOWN'
  483. # try:
  484. # self.cache = {}
  485. # file = open(filename.strip())
  486. # file = file.read().split('CLP')
  487. # _code = '835'
  488. # section = 'CLP'
  489. # if len(file) == 1 :
  490. # file = file[0].split('CLM') #.split('HL')
  491. # _code = '837'
  492. # section = 'CLM' #'HL'
  493. # INITIAL_ROWS = file[0].split(section)[0].split('\n')
  494. # if len(INITIAL_ROWS) == 1 :
  495. # INITIAL_ROWS = INITIAL_ROWS[0].split('~')
  496. # # for item in file[1:] :
  497. # # item = item.replace('~','\n')
  498. # # print (INITIAL_ROWS)
  499. # DEFAULT_VALUE = self.get.default_value(INITIAL_ROWS,_code)
  500. # DEFAULT_VALUE['name'] = filename.strip()
  501. # file = section.join(file).split('\n')
  502. # if len(file) == 1:
  503. # file = file[0].split('~')
  504. # #
  505. # # In the initial rows, there's redundant information (so much for x12 standard)
  506. # # index 1 identifies file type i.e CLM for claim and CLP for remittance
  507. # segment = []
  508. # index = 0;
  509. # _toprows = []
  510. # _default = None
  511. # for row in file :
  512. # row = row.replace('\r','')
  513. # # if not segment and not row.startswith(section):
  514. # # _toprows += [row]
  515. # if row.startswith(section) and not segment:
  516. # segment = [row]
  517. # continue
  518. # elif segment and not row.startswith(section):
  519. # segment.append(row)
  520. # if len(segment) > 1 and row.startswith(section):
  521. # #
  522. # # process the segment somewhere (create a thread maybe?)
  523. # #
  524. # _claim = self.apply(segment,_code)
  525. # if _claim :
  526. # _claim['index'] = index #len(claims)
  527. # # claims.append(dict(DEFAULT_VALUE,**_claim))
  528. # #
  529. # # schema = [ {key:{"mergeStrategy":"append" if list( type(_claim[key])) else "overwrite"}} for key in _claim.keys()] # if type(_claim[key]) == list]
  530. # # _schema = set(DEFAULT_VALUE.keys()) - schema
  531. # # if schema :
  532. # # schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
  533. # # else:
  534. # # schema = {"properties":{}}
  535. # # schema = jsonmerge.merge(schema['properties'],dict.fromkeys(_schema,{"mergeStrategy":"overwrite"}))
  536. # schema = {"properties":{}}
  537. # for attr in _claim.keys() :
  538. # schema['properties'][attr] = {"mergeStrategy": "append" if type(_claim[attr]) == list else "overwrite" }
  539. # merger = jsonmerge.Merger(schema)
  540. # _baseclaim = None
  541. # _baseclaim = merger.merge(_baseclaim,copy.deepcopy(DEFAULT_VALUE))
  542. # _claim = merger.merge(_baseclaim,_claim)
  543. # # _claim = merger.merge(DEFAULT_VALUE.copy(),_claim)
  544. # claims.append( _claim)
  545. # segment = [row]
  546. # index += 1
  547. # pass
  548. # #
  549. # # Handling the last claim found
  550. # if segment and segment[0].startswith(section) :
  551. # # default_claim = dict({"name":index},**DEFAULT_VALUE)
  552. # claim = self.apply(segment,_code)
  553. # if claim :
  554. # claim['index'] = len(claims)
  555. # # schema = [key for key in claim.keys() if type(claim[key]) == list]
  556. # # if schema :
  557. # # schema = {"properties":dict.fromkeys(schema,{"mergeStrategy":"append"})}
  558. # # else:
  559. # # print (claim.keys())
  560. # # schema = {}
  561. # #
  562. # # @TODO: Fix merger related to schema (drops certain fields ... NOT cool)
  563. # # merger = jsonmerge.Merger(schema)
  564. # # top_row_claim = self.apply(_toprows,_code)
  565. # # claim = merger.merge(claim,self.apply(_toprows,_code))
  566. # # claims.append(dict(DEFAULT_VALUE,**claim))
  567. # schema = {"properties":{}}
  568. # for attr in claim.keys() :
  569. # schema['properties'][attr] = {"mergeStrategy": "append" if type(claim[attr]) == list else "overwrite" }
  570. # merger = jsonmerge.Merger(schema)
  571. # _baseclaim = None
  572. # _baseclaim = merger.merge(_baseclaim,copy.deepcopy(DEFAULT_VALUE))
  573. # claim = merger.merge(_baseclaim,claim)
  574. # claims.append(claim)
  575. # # claims.append(merger.merge(DEFAULT_VALUE.copy(),claim))
  576. # if type(file) != list :
  577. # file.close()
  578. # # x12_file = open(filename.strip(),errors='ignore').read().split('\n')
  579. # except Exception as e:
  580. # logs.append ({"parse":_code,"completed":False,"name":filename,"msg":e.args[0]})
  581. # return [],logs,None
  582. # rate = 0 if len(claims) == 0 else (1 + index)/len(claims)
  583. # logs.append ({"parse":"claims" if _code == '837' else 'remits',"completed":True,"name":filename,"rate":rate})
  584. # # self.finish(claims,logs,_code)
  585. # return claims,logs,_code
  586. # def run(self):
  587. # if self.emit.pre :
  588. # self.emit.pre()
  589. # for filename in self.files :
  590. # content,logs,_code = self.read(filename)
  591. # self.finish(content,logs,_code)
  592. # def finish(self,content,logs,_code) :
  593. # args = self.store
  594. # _args = json.loads(json.dumps(self.store))
  595. # ISNEW_MONGO = 'provider' in args and args['provider'] in ['mongo', 'mongodb']
  596. # ISLEG_MONGO = ('type' in args and args['type'] == 'mongo.MongoWriter')
  597. # if ISLEG_MONGO or ISNEW_MONGO:
  598. # if ISLEG_MONGO:
  599. # # Legacy specification ...
  600. # args['args']['doc'] = 'claims' if _code == '837' else 'remits'
  601. # _args['args']['doc'] = 'logs'
  602. # else:
  603. # args['doc'] = 'claims' if _code == '837' else 'remits'
  604. # _args['doc'] = 'logs'
  605. # else:
  606. # if 'type' in args :
  607. # # Legacy specification ...
  608. # args['args']['table'] = 'claims' if _code == '837' else 'remits'
  609. # _args['args']['table'] = 'logs'
  610. # table = args['args']['table']
  611. # else:
  612. # args['table']= 'claims' if _code == '837' else 'remits'
  613. # _args['table'] = 'logs'
  614. # table = args['table']
  615. # writer = transport.factory.instance(**args)
  616. # IS_SQLITE = type(writer) == transport.disk.SQLiteWriter
  617. # if content:
  618. # if IS_SQLITE :
  619. # for row in content :
  620. # writer.apply("""insert into :table(data) values (':values')""".replace(":values",json.dumps(row)).replace(":table",table) )
  621. # else:
  622. # writer.write(content)
  623. # writer.close()
  624. # if logs :
  625. # logger = transport.factory.instance(**_args)
  626. # if IS_SQLITE:
  627. # for row in logs:
  628. # logger.apply("""insert into logs values (':values')""".replace(":values",json.dumps(row)))
  629. # else:
  630. # logger.write(logs)
  631. # logger.close()
  632. # if self.emit.post :
  633. # self.emit.post(content,logs)