clove_recycle_spider.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. # -*- coding: utf-8 -*-
  2. # Author : Charley
  3. # Python : 3.10.8
  4. # Date : 2025/8/1 13:46
  5. import time
  6. import requests
  7. import inspect
  8. import schedule
  9. from loguru import logger
  10. from parsel import Selector
  11. from tenacity import retry, stop_after_attempt, wait_fixed
  12. from mysql_pool import MySQLConnectionPool
  13. """
  14. https://store.clove.jp/jp/buying/pokemon/featured
  15. """
  16. # logger.remove()
  17. # logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
  18. # format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
  19. # level="DEBUG", retention="7 day")
  20. def after_log(retry_state):
  21. """
  22. retry 回调
  23. :param retry_state: RetryCallState 对象
  24. """
  25. # 检查 args 是否存在且不为空
  26. if retry_state.args and len(retry_state.args) > 0:
  27. log = retry_state.args[0] # 获取传入的 logger
  28. else:
  29. log = logger # 使用全局 logger
  30. if retry_state.outcome.failed:
  31. log.warning(
  32. f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
  33. else:
  34. log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
  35. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  36. def get_proxys(log):
  37. """
  38. 获取代理
  39. :return: 代理
  40. """
  41. tunnel = "x371.kdltps.com:15818"
  42. kdl_username = "t13753103189895"
  43. kdl_password = "o0yefv6z"
  44. try:
  45. proxies = {
  46. "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
  47. "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
  48. }
  49. return proxies
  50. except Exception as e:
  51. log.error(f"Error getting proxy: {e}")
  52. raise e
  53. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  54. def get_recycle_list(log, category, sql_pool):
  55. log.debug(f'{inspect.currentframe().f_code.co_name} start, category:{category}....................')
  56. headers = {
  57. "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
  58. "referer": "https://store.clove.jp/jp/buying/pokemon",
  59. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"
  60. }
  61. # url = "https://store.clove.jp/jp/buying/pokemon/featured"
  62. url = f"https://store.clove.jp/jp/buying/{category}/featured"
  63. response = requests.get(url, headers=headers, timeout=22)
  64. # response = requests.get(url, headers=headers, proxies=get_proxys(log), timeout=22)
  65. # print(response.text)
  66. response.raise_for_status()
  67. selector = Selector(text=response.text)
  68. tag_li_list = selector.xpath('//div[@class="w-full"]/ul/li')
  69. if not tag_li_list:
  70. log.warning(f"{inspect.currentframe().f_code.co_name}, 获取列表失败, category:{category}..........")
  71. return
  72. info_list = []
  73. for tag_li in tag_li_list:
  74. image_url = tag_li.xpath('./div/button//img/@src').get()
  75. image_url = 'https://store.clove.jp/' + image_url if image_url else None
  76. title = tag_li.xpath('./div/div/p[1]/text()').get()
  77. subtitle = tag_li.xpath('./div/div/p[2]/text()').get()
  78. price = tag_li.xpath('./div/div/div/p[2]/text()').get()
  79. price = price.replace(',', '') if price else None
  80. data_dict = {
  81. "title": title,
  82. "subtitle": subtitle,
  83. "price": price,
  84. "image_url": image_url,
  85. "category": category
  86. }
  87. # print(data_dict)
  88. info_list.append(data_dict)
  89. if info_list:
  90. try:
  91. sql_pool.insert_many(table="clove_recycle_record", data_list=info_list)
  92. except Exception as e:
  93. log.warning(f"{inspect.currentframe().f_code.co_name}, {e[:500]}")
  94. @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
  95. def recycle_main(log):
  96. """
  97. 主函数
  98. :param log: logger对象
  99. """
  100. log.info(
  101. f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务.................................................')
  102. # 配置 MySQL 连接池
  103. sql_pool = MySQLConnectionPool(log=log)
  104. if not sql_pool.check_pool_health():
  105. log.error("数据库连接池异常")
  106. raise RuntimeError("数据库连接池异常")
  107. try:
  108. category_list = ["pokemon", "onepiece", "duel-masters", "lorcana", "fab"]
  109. for category in category_list:
  110. try:
  111. get_recycle_list(log, category, sql_pool)
  112. except Exception as e2:
  113. log.error(f"Request get_lucky_bag_list error: {e2}")
  114. except Exception as e:
  115. log.error(f'{inspect.currentframe().f_code.co_name} error: {e}')
  116. finally:
  117. log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
  118. def schedule_task():
  119. """
  120. 爬虫模块 定时任务 的启动文件
  121. """
  122. # 立即运行一次任务
  123. recycle_main(log=logger)
  124. # 设置定时任务
  125. schedule.every().day.at("00:01").do(recycle_main, log=logger)
  126. while True:
  127. schedule.run_pending()
  128. time.sleep(1)
  129. if __name__ == '__main__':
  130. # get_recycle_list(logger)
  131. schedule_task()