| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155 |
- # -*- coding: utf-8 -*-
- # Author : Charley
- # Python : 3.10.8
- # Date : 2025/8/1 13:46
- import time
- import requests
- import inspect
- import schedule
- from loguru import logger
- from parsel import Selector
- from tenacity import retry, stop_after_attempt, wait_fixed
- from mysql_pool import MySQLConnectionPool
- """
- https://store.clove.jp/jp/buying/pokemon/featured
- """
- # logger.remove()
- # logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
- # format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
- # level="DEBUG", retention="7 day")
- def after_log(retry_state):
- """
- retry 回调
- :param retry_state: RetryCallState 对象
- """
- # 检查 args 是否存在且不为空
- if retry_state.args and len(retry_state.args) > 0:
- log = retry_state.args[0] # 获取传入的 logger
- else:
- log = logger # 使用全局 logger
- if retry_state.outcome.failed:
- log.warning(
- f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
- else:
- log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
- @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
- def get_proxys(log):
- """
- 获取代理
- :return: 代理
- """
- tunnel = "x371.kdltps.com:15818"
- kdl_username = "t13753103189895"
- kdl_password = "o0yefv6z"
- try:
- proxies = {
- "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
- "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
- }
- return proxies
- except Exception as e:
- log.error(f"Error getting proxy: {e}")
- raise e
- @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
- def get_recycle_list(log, category, sql_pool):
- log.debug(f'{inspect.currentframe().f_code.co_name} start, category:{category}....................')
- headers = {
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
- "referer": "https://store.clove.jp/jp/buying/pokemon",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"
- }
- # url = "https://store.clove.jp/jp/buying/pokemon/featured"
- url = f"https://store.clove.jp/jp/buying/{category}/featured"
- response = requests.get(url, headers=headers, timeout=22)
- # response = requests.get(url, headers=headers, proxies=get_proxys(log), timeout=22)
- # print(response.text)
- response.raise_for_status()
- selector = Selector(text=response.text)
- tag_li_list = selector.xpath('//div[@class="w-full"]/ul/li')
- if not tag_li_list:
- log.warning(f"{inspect.currentframe().f_code.co_name}, 获取列表失败, category:{category}..........")
- return
- info_list = []
- for tag_li in tag_li_list:
- image_url = tag_li.xpath('./div/button//img/@src').get()
- image_url = 'https://store.clove.jp/' + image_url if image_url else None
- title = tag_li.xpath('./div/div/p[1]/text()').get()
- subtitle = tag_li.xpath('./div/div/p[2]/text()').get()
- price = tag_li.xpath('./div/div/div/p[2]/text()').get()
- price = price.replace(',', '') if price else None
- data_dict = {
- "title": title,
- "subtitle": subtitle,
- "price": price,
- "image_url": image_url,
- "category": category
- }
- # print(data_dict)
- info_list.append(data_dict)
- if info_list:
- try:
- sql_pool.insert_many(table="clove_recycle_record", data_list=info_list)
- except Exception as e:
- log.warning(f"{inspect.currentframe().f_code.co_name}, {e[:500]}")
- @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
- def recycle_main(log):
- """
- 主函数
- :param log: logger对象
- """
- log.info(
- f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务.................................................')
- # 配置 MySQL 连接池
- sql_pool = MySQLConnectionPool(log=log)
- if not sql_pool.check_pool_health():
- log.error("数据库连接池异常")
- raise RuntimeError("数据库连接池异常")
- try:
- category_list = ["pokemon", "onepiece", "duel-masters", "lorcana", "fab"]
- for category in category_list:
- try:
- get_recycle_list(log, category, sql_pool)
- except Exception as e2:
- log.error(f"Request get_lucky_bag_list error: {e2}")
- except Exception as e:
- log.error(f'{inspect.currentframe().f_code.co_name} error: {e}')
- finally:
- log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
- def schedule_task():
- """
- 爬虫模块 定时任务 的启动文件
- """
- # 立即运行一次任务
- recycle_main(log=logger)
- # 设置定时任务
- schedule.every().day.at("00:01").do(recycle_main, log=logger)
- while True:
- schedule.run_pending()
- time.sleep(1)
- if __name__ == '__main__':
- # get_recycle_list(logger)
- schedule_task()
|