ags_pop_spider.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. # -*- coding: utf-8 -*-
  2. # Author : Charley
  3. # Python : 3.10.8
  4. # Date : 2025/11/6 10:48
  5. import inspect
  6. import time
  7. import requests
  8. import schedule
  9. import user_agent
  10. from loguru import logger
  11. from parsel import Selector
  12. from mysql_pool import MySQLConnectionPool
  13. from tenacity import retry, stop_after_attempt, wait_fixed
  14. logger.remove()
  15. logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
  16. format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
  17. level="DEBUG", retention="7 day")
  18. def after_log(retry_state):
  19. """
  20. retry 回调
  21. :param retry_state: RetryCallState 对象
  22. """
  23. # 检查 args 是否存在且不为空
  24. if retry_state.args and len(retry_state.args) > 0:
  25. log = retry_state.args[0] # 获取传入的 logger
  26. else:
  27. log = logger # 使用全局 logger
  28. if retry_state.outcome.failed:
  29. log.warning(
  30. f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
  31. else:
  32. log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
  33. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  34. def get_proxys(log):
  35. """
  36. 获取代理
  37. :return: 代理
  38. """
  39. tunnel = "x371.kdltps.com:15818"
  40. kdl_username = "t13753103189895"
  41. kdl_password = "o0yefv6z"
  42. try:
  43. proxies = {
  44. "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
  45. "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
  46. }
  47. return proxies
  48. except Exception as e:
  49. log.error(f"Error getting proxy: {e}")
  50. raise e
  51. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  52. def get_detail_data(log, sql_pool, category_name, category_link, crawl_date):
  53. """
  54. 获取卡组 数据报告 信息详情
  55. :param log: logger对象
  56. :param sql_pool: MySQLConnectionPool对象
  57. :param category_name: 分类名称
  58. :param category_link: 分类链接
  59. :param crawl_date: 抓取日期
  60. """
  61. log.debug(f"开始获取 {category_name} 数据报告 信息详情, 链接为: {category_link}")
  62. headers = {
  63. "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  64. "referer": "https://agscard.com/pop",
  65. "user-agent": user_agent.generate_user_agent()
  66. }
  67. response = requests.get(category_link, headers=headers, timeout=10)
  68. response.raise_for_status()
  69. selector = Selector(text=response.text)
  70. tag_h2_list = selector.xpath('//h2[@class="pop-hero__text-subheading pop-hero__text-stats"]')
  71. category_sets = tag_h2_list.xpath('./span[1]/span[1]/text()').get()
  72. category_sets = category_sets.strip().replace(",", "") if category_sets else None
  73. category_cards = tag_h2_list.xpath('./span[2]/span[1]/text()').get()
  74. category_cards = category_cards.strip().replace(",", "") if category_cards else None
  75. category_graded = tag_h2_list.xpath('./span[3]/span[1]/text()').get()
  76. category_graded = category_graded.strip().replace(",", "") if category_graded else None
  77. data_dict = {
  78. 'category_name': category_name,
  79. 'category_link': category_link,
  80. 'category_sets': category_sets,
  81. 'category_cards': category_cards,
  82. 'category_graded': category_graded,
  83. 'crawl_date': crawl_date
  84. }
  85. # print(data_dict)
  86. # 保存数据
  87. sql_pool.insert_one_or_dict(table="ags_pop_record", data=data_dict, ignore=True)
  88. @retry(stop=stop_after_attempt(5), wait=wait_fixed(2), after=after_log)
  89. def get_ags_pop_list(log, sql_pool):
  90. """
  91. 获取卡组 数据报告 信息
  92. :param log: logger对象
  93. :param sql_pool: MySQLConnectionPool对象
  94. """
  95. crawl_date = time.strftime("%Y-%m-%d", time.localtime())
  96. headers = {
  97. "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  98. "user-agent": user_agent.generate_user_agent()
  99. }
  100. url = "https://agscard.com/pop"
  101. response = requests.get(url, headers=headers, timeout=10)
  102. # print(response.text)
  103. # print(response)
  104. response.raise_for_status()
  105. selector = Selector(text=response.text)
  106. tag_a_list = selector.xpath('//div[@class="row"]/a')
  107. info_list = []
  108. for tag_a in tag_a_list:
  109. category_name = tag_a.xpath('./div[1]/img/@alt').get()
  110. category_link = tag_a.xpath('./@href').get()
  111. # category_sets = tag_a.xpath('./div[2]/div[1]/div[1]/text()').get()
  112. # category_cards = tag_a.xpath('./div[2]/div[2]/div[1]/text()').get()
  113. # category_graded = tag_a.xpath('./div[2]/div[3]/div[1]/text()').get()
  114. get_detail_data(log, sql_pool, category_name, category_link, crawl_date)
  115. @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
  116. def ags_pop_main(log):
  117. """
  118. 主函数
  119. :param log: logger对象
  120. """
  121. log.info(
  122. f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务....................................................')
  123. # 配置 MySQL 连接池
  124. sql_pool = MySQLConnectionPool(log=log)
  125. if not sql_pool.check_pool_health():
  126. log.error("数据库连接池异常")
  127. raise RuntimeError("数据库连接池异常")
  128. try:
  129. # 获取所有卡组中的卡牌信息
  130. get_ags_pop_list(log, sql_pool)
  131. except Exception as e:
  132. log.error(f'{inspect.currentframe().f_code.co_name} error: {e}')
  133. finally:
  134. log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
  135. def schedule_task():
  136. """
  137. 爬虫模块 定时任务 的启动文件
  138. """
  139. # 立即运行一次任务
  140. ags_pop_main(log=logger)
  141. # 设置定时任务
  142. schedule.every().day.at("00:01").do(ags_pop_main, log=logger)
  143. while True:
  144. schedule.run_pending()
  145. time.sleep(1)
  146. if __name__ == '__main__':
  147. schedule_task()
  148. # ags_pop_main(log=logger)