datax-sync-template-gen.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. #!/usr/bin/env /usr/bin/python3
  2. # -*- coding:utf-8 -*-
  3. """
  4. PG → HDFS DataX sync ini 模板生成器 + raw 建模 metadata 表 + 表探查。
  5. 一次跑同时产出三件:
  6. 1. PG 表探查段(行数估值 + 锚点字段维护质量 + 软删字段命中),落 md 头部
  7. 2. PG 全字段 metadata markdown 表(序号/字段名/中文名/数据类型/主键标识/
  8. 脱敏类型)—— 用于 kb/24 raw 建模文档
  9. 3. 全字段 sync ini 模板 —— 开发者按 md 讨论结果手动裁剪字段 / 改 where /
  10. 加 [mask] / 调 splitPk / 改 writer.path 表名后缀等,再提交到 jobs/raw/{域}/
  11. CLI:
  12. python3 bin/datax-sync-template-gen.py \\
  13. -ds postgresql/prod-hobby \\
  14. -t public.card_group_order_info \\
  15. [-mask-conf <PATH>] [-o [DIR]]
  16. 参数:
  17. -ds 数据源 ref,形如 {db_type}/{env}-{实例简称}(同 sync ini
  18. 里 dataSource 字段格式)。暂只支持 postgresql。
  19. -t schema 限定的表名(如 public.card_group_order_info)。
  20. -mask-conf mask 配置 ini 路径({table}.mask.ini,可选)。传入时按配置
  21. 剔除 trim 字段 + 渲染 [mask] 段,md 脱敏类型列填好;不传时
  22. 全字段输出,md 脱敏类型列空白。**文件不存在直接报错**。
  23. -o 输出目录(可选;任意三态下 stdout 都同时打印 md + ini):
  24. - 不传:仅 stdout
  25. - 传 -o 不带值:stdout + 落盘 workspace/{yyyymmdd}/{table}.{md,ini}
  26. - 传 -o <DIR>:stdout + 落盘 <DIR>/{table}.{md,ini}
  27. """
  28. import argparse
  29. import os
  30. import re
  31. import sys
  32. from configparser import ConfigParser
  33. from datetime import datetime
  34. project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  35. sys.path.append(project_root)
  36. from dw_base.datax.datasources.data_source_factory import DataSourceFactory
  37. from dw_base.datax.datax_constants import DS_POSTGRE_SQL_JDBC_URL
  38. WORKSPACE_DEFAULT = os.path.join(
  39. project_root, 'workspace', datetime.now().strftime('%Y%m%d'),
  40. )
  41. # 探查硬编码:增量同步标准锚点字段(推行后端命名标准)
  42. ANCHOR_FIELDS = ('create_time', 'update_time')
  43. # 近期窗口:按 PK 倒序取 N 行验证 update_time 是否近期连续维护
  44. PROBE_RECENT_LIMIT = 100
  45. def resolve_datasource(ds_ref):
  46. """复用 plugin.py:34-42 的 ref → DataSource 解析逻辑。
  47. ds_ref 形如 'postgresql/prod-hobby',首段为 db_type(同父目录名)。
  48. datasource ini 落点:项目同级 ../datasource/{ds_ref}.ini。
  49. """
  50. ds_type = ds_ref.split('/')[0]
  51. if ds_type != 'postgresql':
  52. raise NotImplementedError('暂只支持 postgresql 数据源,收到: ' + ds_type)
  53. ds_file_path = os.path.normpath(
  54. os.path.join(project_root, '..', 'datasource', ds_ref + '.ini'))
  55. if not os.path.isfile(ds_file_path):
  56. raise FileNotFoundError('数据源 ini 不存在: ' + ds_file_path)
  57. return DataSourceFactory.get_data_source(ds_type, ds_file_path)
  58. def parse_jdbc_url(jdbc_url):
  59. """从 jdbc:postgresql://host:port/database 抽 (host, port, database)。"""
  60. m = re.match(r'jdbc:postgresql://([^:/]+)(?::(\d+))?/(.+)', jdbc_url)
  61. if not m:
  62. raise ValueError('无法解析 PG jdbcUrl: ' + jdbc_url)
  63. return m.group(1), int(m.group(2) or 5432), m.group(3)
  64. def query_columns_full(conn, schema, table):
  65. """带序号 / 类型 / 主键标识的全字段 metadata 查询,按 attnum 排序。
  66. 返回 [(attnum, attname, comment, pg_type, pk_flag), ...]
  67. """
  68. cur = conn.cursor()
  69. cur.execute("""
  70. SELECT
  71. a.attnum,
  72. a.attname,
  73. pg_catalog.col_description(a.attrelid, a.attnum),
  74. pg_catalog.format_type(a.atttypid, a.atttypmod),
  75. CASE WHEN EXISTS (
  76. SELECT 1 FROM pg_index i
  77. WHERE i.indrelid = a.attrelid AND i.indisprimary
  78. AND a.attnum = ANY(i.indkey)
  79. ) THEN 'PK' ELSE '' END
  80. FROM pg_catalog.pg_attribute a
  81. JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
  82. JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
  83. WHERE n.nspname = %s AND c.relname = %s
  84. AND a.attnum > 0 AND NOT a.attisdropped
  85. ORDER BY a.attnum
  86. """, (schema, table))
  87. return cur.fetchall()
  88. def probe_table(conn, schema, table, full_rows):
  89. """对表做行数 + PK + 锚点(主键序范围 / 最早非空 update_time / 近期连续)+ 软删命中。
  90. - 行数:pg_class.reltuples 估值
  91. - PK:单/复合/无 + 是否自增(attidentity + default 表达式 nextval 双判)
  92. - create_time 主键序范围:单列自增 PK + create_time 存在;ORDER BY pk ASC/DESC LIMIT 1
  93. - update_time 最早非空:全表 min(update_time),加 statement_timeout 60s 保护
  94. - update_time 近期连续:单 PK 时 ORDER BY pk DESC LIMIT 100 验证近期是否连续维护
  95. - 软删:full_rows 筛 'del' 子串(不区分大小写)
  96. """
  97. cur = conn.cursor()
  98. cur.execute("""
  99. SELECT c.reltuples::bigint
  100. FROM pg_catalog.pg_class c
  101. JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
  102. WHERE n.nspname = %s AND c.relname = %s
  103. """, (schema, table))
  104. row = cur.fetchone()
  105. reltuples = int(row[0]) if row and row[0] is not None else 0
  106. pk_cols = [r[1] for r in full_rows if r[4] == 'PK']
  107. pk_auto_increment = False
  108. if len(pk_cols) == 1:
  109. cur.execute("""
  110. SELECT a.attidentity, pg_get_expr(ad.adbin, ad.adrelid)
  111. FROM pg_attribute a
  112. LEFT JOIN pg_attrdef ad ON ad.adrelid = a.attrelid AND ad.adnum = a.attnum
  113. JOIN pg_class c ON c.oid = a.attrelid
  114. JOIN pg_namespace n ON n.oid = c.relnamespace
  115. WHERE n.nspname = %s AND c.relname = %s AND a.attname = %s
  116. """, (schema, table, pk_cols[0]))
  117. r = cur.fetchone()
  118. if r:
  119. attidentity, default_expr = r[0], r[1]
  120. pk_auto_increment = (
  121. attidentity in ('a', 'd')
  122. or (default_expr is not None and 'nextval' in default_expr.lower())
  123. )
  124. field_names = {r[1] for r in full_rows}
  125. create_exists = 'create_time' in field_names
  126. update_exists = 'update_time' in field_names
  127. create_earliest = None
  128. create_latest = None
  129. if pk_auto_increment and create_exists:
  130. cur.execute(
  131. 'SELECT '
  132. '(SELECT create_time FROM "{s}"."{t}" ORDER BY "{p}" ASC LIMIT 1), '
  133. '(SELECT create_time FROM "{s}"."{t}" ORDER BY "{p}" DESC LIMIT 1)'
  134. .format(s=schema, t=table, p=pk_cols[0]))
  135. r = cur.fetchone()
  136. create_earliest, create_latest = r[0], r[1]
  137. update_earliest = None
  138. update_earliest_timeout = False
  139. if update_exists:
  140. try:
  141. cur.execute("SET statement_timeout = '60s'")
  142. cur.execute('SELECT min(update_time) FROM "{s}"."{t}"'.format(
  143. s=schema, t=table))
  144. update_earliest = cur.fetchone()[0]
  145. except Exception:
  146. update_earliest_timeout = True
  147. try:
  148. conn.rollback()
  149. except Exception:
  150. pass
  151. try:
  152. cur.execute("RESET statement_timeout")
  153. except Exception:
  154. try:
  155. conn.rollback()
  156. cur.execute("RESET statement_timeout")
  157. except Exception:
  158. pass
  159. recent_total = None
  160. recent_update_notnull = None
  161. if len(pk_cols) == 1 and update_exists:
  162. cur.execute(
  163. 'SELECT count(*), count("update_time") FROM '
  164. '(SELECT update_time FROM "{s}"."{t}" ORDER BY "{p}" DESC LIMIT {n}) AS sub'
  165. .format(s=schema, t=table, p=pk_cols[0], n=PROBE_RECENT_LIMIT))
  166. r = cur.fetchone()
  167. recent_total = int(r[0])
  168. recent_update_notnull = int(r[1])
  169. del_candidates = sorted(r[1] for r in full_rows if 'del' in r[1].lower())
  170. return {
  171. 'reltuples': reltuples,
  172. 'pk_cols': pk_cols,
  173. 'pk_auto_increment': pk_auto_increment,
  174. 'create_exists': create_exists,
  175. 'create_earliest': create_earliest,
  176. 'create_latest': create_latest,
  177. 'update_exists': update_exists,
  178. 'update_earliest': update_earliest,
  179. 'update_earliest_timeout': update_earliest_timeout,
  180. 'recent_total': recent_total,
  181. 'recent_update_notnull': recent_update_notnull,
  182. 'del_candidates': del_candidates,
  183. }
  184. def render_probe_md(stats):
  185. """渲染探查段 markdown。"""
  186. lines = ['### 探查', '']
  187. lines.append('- 行数估值(pg_class.reltuples):{:,}'.format(stats['reltuples']))
  188. pk_cols = stats['pk_cols']
  189. if not pk_cols:
  190. pk_desc = '无(DataX channel 无法并行)'
  191. elif len(pk_cols) > 1:
  192. pk_desc = '复合 ({}) (DataX splitPk 不支持复合,退串行)'.format(
  193. ', '.join('`{}`'.format(c) for c in pk_cols))
  194. elif stats['pk_auto_increment']:
  195. pk_desc = '`{}`(自增)'.format(pk_cols[0])
  196. else:
  197. pk_desc = '`{}`(非自增,DataX channel 切分分布可能不均)'.format(pk_cols[0])
  198. lines.append('- 主键:' + pk_desc)
  199. lines.append('- 锚点字段:')
  200. if not stats['create_exists']:
  201. lines.append(' - `create_time`:缺失')
  202. elif stats['create_earliest']:
  203. lines.append(' - `create_time`:存在;按主键序范围 {} ~ {}'.format(
  204. stats['create_earliest'], stats['create_latest']))
  205. else:
  206. lines.append(' - `create_time`:存在')
  207. if not stats['update_exists']:
  208. lines.append(' - `update_time`:缺失')
  209. else:
  210. parts = ['`update_time`:存在']
  211. if stats['update_earliest_timeout']:
  212. parts.append('最早非空(查询超时)')
  213. elif stats['update_earliest'] is not None:
  214. parts.append('最早非空 {}'.format(stats['update_earliest']))
  215. else:
  216. parts.append('最早非空(全 NULL)')
  217. if stats['recent_total'] is not None and stats['recent_total'] > 0:
  218. rpct = 100.0 * stats['recent_update_notnull'] / stats['recent_total']
  219. parts.append('近期 {} 行非空率 {:.1f}%'.format(
  220. stats['recent_total'], rpct))
  221. lines.append(' - ' + ';'.join(parts))
  222. if stats['del_candidates']:
  223. lines.append('- 软删字段(含 `del` 子串):' + ', '.join(
  224. '`{}`'.format(c) for c in stats['del_candidates']))
  225. else:
  226. lines.append('- 软删字段(含 `del` 子串):未命中')
  227. return '\n'.join(lines) + '\n'
  228. def _resolve_to_project_root(path):
  229. """相对路径按项目根解析,绝对路径原样返回。
  230. 复用 dw_base.datax.entry._resolve_relative_to_base 的逻辑——
  231. 任何 cwd 跑此脚本都能找到 mask conf 等相对路径资源,
  232. 与项目其他 bin 入口(datax-hive-import-starter 等)行为一致。
  233. """
  234. if os.path.isabs(path):
  235. return path
  236. return os.path.join(project_root, path)
  237. def load_mask_conf(path):
  238. """读 mask 配置 ini,返回 {field: method} dict。
  239. 格式(与 jobs/raw/{域}/{table}.mask.ini 同款):
  240. [mask]
  241. field1 = method1
  242. field2 = method2
  243. method ∈ trim / md5 / month_trunc / mask_middle / keep_first_n / keep_last_n
  244. - trim:整字段不入 raw(reader column 不查询)
  245. - 其他:字段入 raw,由 dw_base.datax.mask 在 reader 端脱敏
  246. 文件不存在直接 raise FileNotFoundError(不静默失败)。
  247. """
  248. if not os.path.isfile(path):
  249. raise FileNotFoundError('mask 配置不存在: ' + path)
  250. cp = ConfigParser()
  251. cp.read(path, encoding='utf-8')
  252. if not cp.has_section('mask'):
  253. return {}
  254. return dict(cp.items('mask'))
  255. def render_schema_md(rows, mask_dict=None):
  256. """输出 markdown 表格:序号 / 字段名 / 中文名 / 数据类型 / 主键标识 / 脱敏类型。
  257. mask_dict 不传时脱敏类型列为空白;传入时填字段对应的 method(含 trim)。
  258. """
  259. lines = [
  260. '| 序号 | 字段名 | 中文名 | 数据类型 | 主键标识 | 脱敏类型 |',
  261. '| --- | --- | --- | --- | --- | --- |',
  262. ]
  263. methods = mask_dict or {}
  264. for num, name, comment, typ, pk in rows:
  265. method = methods.get(name, '')
  266. lines.append('| {} | `{}` | {} | {} | {} | {} |'.format(
  267. num, name, comment or '', typ, pk, method))
  268. return '\n'.join(lines) + '\n'
  269. def render_template(ds_ref, database, schema, table, columns, pk, mask_methods=None):
  270. """渲染 sync ini 模板。
  271. columns: [(name, comment), ...] 已剔除 trim 字段,保持 PG 原顺序
  272. mask_methods: {field: method} 仅含非 trim 方法(mask_middle / month_trunc 等),
  273. 渲染 [mask] 段;空 dict 或 None 时不渲染 [mask] 段
  274. """
  275. column_str = ','.join(c for c, _ in columns)
  276. today = datetime.now().strftime('%Y-%m-%d')
  277. if mask_methods:
  278. mask_lines = '\n'.join('{} = {}'.format(f, m) for f, m in mask_methods.items())
  279. mask_section = '[mask]\n' + mask_lines + '\n\n'
  280. else:
  281. mask_section = ''
  282. return (
  283. '; 作者:<TODO>\n'
  284. '; 日期:{today}\n'
  285. '; 工单:<TODO>\n'
  286. '; 目的:PG {database}.{schema}.{table} → Hive raw.<TODO> 同步模板\n'
  287. '; 状态:[待执行]\n'
  288. '; 备注:自动生成的全字段参考模板。开发者按需裁剪字段 / 改 where / 加 mask 段 /\n'
  289. '; 调 splitPk / 改 writer.path 表名后缀(_inc_d / _his_o 等)\n'
  290. ';\n'
  291. '; 配套 DDL:manual/ddl/raw/<TODO_domain>/raw_<TODO>_create.sql\n'
  292. '\n'
  293. '[reader]\n'
  294. 'dataSource = {ds_ref}\n'
  295. 'database = {database}\n'
  296. 'table = {schema}.{table}\n'
  297. 'column = {column_str}\n'
  298. 'columnType =\n'
  299. "where = update_time >= '${{start_date}}' AND update_time < '${{stop_date}}'\n"
  300. 'querySql =\n'
  301. 'splitPk = {pk}\n'
  302. 'fetchSize = 1000\n'
  303. '\n'
  304. '{mask_section}'
  305. '[writer]\n'
  306. 'dataSource = hdfs/<TODO>\n'
  307. 'path = /user/hive/warehouse/raw.db/{table}_TODO_d/dt=${{dt}}/\n'
  308. 'column = {column_str}\n'
  309. 'columnType =\n'
  310. 'fileType = orc\n'
  311. 'fileName = {table}_TODO_d\n'
  312. 'encoding = UTF-8\n'
  313. 'writeMode = truncate\n'
  314. 'fieldDelimiter = \\t\n'
  315. ).format(
  316. today=today, ds_ref=ds_ref, database=database, schema=schema,
  317. table=table, column_str=column_str, pk=pk, mask_section=mask_section,
  318. )
  319. def main():
  320. parser = argparse.ArgumentParser(
  321. prog='datax-sync-template-gen',
  322. description='PG → HDFS DataX sync ini 模板生成器(全字段参考模板)',
  323. )
  324. parser.add_argument('-ds', required=True, metavar='DS_REF',
  325. help='数据源 ref,形如 postgresql/prod-hobby(同 sync ini dataSource 字段)')
  326. parser.add_argument('-t', required=True, metavar='SCHEMA.TABLE',
  327. help='schema 限定的表名(如 public.card_group_order_info)')
  328. parser.add_argument('-o', nargs='?', const=WORKSPACE_DEFAULT, default=None, metavar='DIR',
  329. help='输出目录(任意三态 stdout 始终打印 md + ini;不传仅 stdout;不带值额外落盘 workspace/{yyyymmdd}/;带值额外落盘 <DIR>/)')
  330. parser.add_argument('-mask-conf', default=None, metavar='PATH', dest='mask_conf',
  331. help='mask 配置 ini 路径({table}.mask.ini)。传入时按配置剔除 trim 字段 + 渲染 [mask] 段,md 脱敏类型列填好;不传时全字段输出,md 脱敏类型列空白')
  332. args = parser.parse_args()
  333. if '.' not in args.t:
  334. print('-t 必须 schema.table 格式,收到: ' + args.t, file=sys.stderr)
  335. sys.exit(2)
  336. schema, table = args.t.split('.', 1)
  337. ds = resolve_datasource(args.ds)
  338. ds_dict = ds.parse()
  339. jdbc_url = ds_dict[DS_POSTGRE_SQL_JDBC_URL]
  340. user = ds_dict['username']
  341. password = ds_dict['password']
  342. host, port, database = parse_jdbc_url(jdbc_url)
  343. import pg8000.dbapi
  344. conn = pg8000.dbapi.connect(
  345. host=host, port=port, database=database,
  346. user=user, password=password,
  347. )
  348. try:
  349. full_rows = query_columns_full(conn, schema, table)
  350. if not full_rows:
  351. raise ValueError('表不存在或无字段: {}.{}'.format(schema, table))
  352. probe_stats = probe_table(conn, schema, table, full_rows)
  353. finally:
  354. conn.close()
  355. # full_rows: [(attnum, attname, comment, pg_type, pk_flag), ...]
  356. if args.mask_conf:
  357. mask_path = _resolve_to_project_root(args.mask_conf)
  358. mask_dict = load_mask_conf(mask_path)
  359. else:
  360. mask_dict = {}
  361. # mask 配置含表中不存在字段时 stderr 警告(不阻断)
  362. pg_field_set = {r[1] for r in full_rows}
  363. unknown_fields = [f for f in mask_dict if f not in pg_field_set]
  364. if unknown_fields:
  365. print('警告:mask 配置含表中不存在字段(已忽略): ' + ', '.join(unknown_fields),
  366. file=sys.stderr)
  367. trim_set = {f for f, m in mask_dict.items() if m == 'trim'}
  368. non_trim_mask = {f: m for f, m in mask_dict.items() if m != 'trim'}
  369. # 已剔除 trim 字段的 column 列表,保持 PG 原顺序(attnum 升序)
  370. columns = [(r[1], r[2] or '') for r in full_rows if r[1] not in trim_set]
  371. pk_names = [r[1] for r in full_rows if r[4] == 'PK']
  372. pk = pk_names[0] if len(pk_names) == 1 and pk_names[0] not in trim_set else ''
  373. probe_md = render_probe_md(probe_stats)
  374. schema_md = render_schema_md(full_rows, mask_dict)
  375. md_content = probe_md + '\n### 字段\n\n' + schema_md
  376. ini_content = render_template(args.ds, database, schema, table, columns, pk, non_trim_mask)
  377. # stdout 始终打印(先 md 表后 ini 模板),传 -o 时再额外落盘
  378. sys.stdout.write(md_content)
  379. sys.stdout.write('\n')
  380. sys.stdout.write(ini_content)
  381. if args.o is not None:
  382. os.makedirs(args.o, exist_ok=True)
  383. md_path = os.path.join(args.o, table + '.md')
  384. ini_path = os.path.join(args.o, table + '.ini')
  385. with open(md_path, 'w', encoding='utf-8') as f:
  386. f.write(md_content)
  387. with open(ini_path, 'w', encoding='utf-8') as f:
  388. f.write(ini_content)
  389. print('已写入: ' + md_path, file=sys.stderr)
  390. print('已写入: ' + ini_path, file=sys.stderr)
  391. if __name__ == '__main__':
  392. main()