# -*- coding: utf-8 -*- # Author : Charley # Python : 3.10.8 # Date : 2025/11/26 17:59 import inspect import requests import user_agent from loguru import logger from tenacity import retry, stop_after_attempt, wait_fixed from jp_set_name_list import SET_US_NAME_LIST from mysql_pool import MySQLConnectionPool crawler_language = "tcg us" logger.remove() logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00", format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}", level="DEBUG", retention="7 day") def after_log(retry_state): """ retry 回调 :param retry_state: RetryCallState 对象 """ # 检查 args 是否存在且不为空 if retry_state.args and len(retry_state.args) > 0: log = retry_state.args[0] # 获取传入的 logger else: log = logger # 使用全局 logger if retry_state.outcome.failed: log.warning( f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times") else: log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded") @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_proxys(log): """ 获取代理 :return: 代理 """ tunnel = "x371.kdltps.com:15818" kdl_username = "t13753103189895" kdl_password = "o0yefv6z" try: proxies = { "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}, "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel} } return proxies except Exception as e: log.error(f"Error getting proxy: {e}") raise e @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_single_page(log, setUrlName, setName, page, sql_pool): """ 获取单页数据 :param log: logger对象 :param setUrlName: setUrlName :param setName: setName :param page: 页码 :param sql_pool: sql_pool :return: 数据列表长度 """ log.debug(f"Getting single page: {setUrlName} -> {page}") headers = { "accept": "application/json, text/plain, */*", "content-type": "application/json", "referer": "https://www.tcgplayer.com/", "user-agent": user_agent.generate_user_agent() } url = "https://mp-search-api.tcgplayer.com/v1/search/request" params = { "q": "", "isList": "false", # "mpfev": "4528" } data = { "algorithm": "sales_dismax", "from": (page - 1) * 24, "size": 24, "filters": { "term": { "productLineName": [ "pokemon" ], "productTypeName": [ "Cards" ], "setName": [ setUrlName ] }, "range": {}, "match": {} }, "listingSearch": { "context": { "cart": {}, "shippingCountry": "US" }, "filters": { "term": { "sellerStatus": "Live", "channelId": 0 }, "range": { "quantity": { "gte": 1 } }, "exclude": { "channelExclusion": 0 } } }, "context": { "cart": {}, "shippingCountry": "US", "userProfile": {} }, "settings": { "useFuzzySearch": True, "didYouMean": {} }, "sort": {} } response = requests.post(url, headers=headers, params=params, json=data) # print(response.json()) response.raise_for_status() resp_json = response.json() results = resp_json.get("results", []) if results: # # 获取set list # aggregations = results[0].get("aggregations", {}).get("setName", []) # # info_list = [] # for agg in aggregations: # set_name = agg.get("value") # url_value = agg.get("urlValue") # data_dict = { # "set_name": set_name, # "url_value": url_value # } # info_list.append(data_dict) # print(info_list) # time.sleep(11111) data_list = results[0].get("results", []) # print(len(data_list)) info_list = [] for data in data_list: card_id = data.get("productId") card_id = int(card_id) if card_id else 0 card_name = data.get("productName") pg_value = setUrlName pg_label = setName sales_date = data.get("customAttributes", {}).get("releaseDate") card_no = data.get("customAttributes", {}).get("number") rarity = data.get("rarityName") img = f'https://tcgplayer-cdn.tcgplayer.com/product/{card_id}_in_1000x1000.jpg' data_dict = { "card_id": card_id, "card_name": card_name, "img": img, "pg_value": pg_value, "pg_label": pg_label, "sales_date": sales_date, "card_no": card_no, "rarity": rarity, "crawler_language": crawler_language } # print(data_dict) info_list.append(data_dict) # 保存数据 if info_list: sql_pool.insert_many(table="pokemon_card_record", data_list=info_list, ignore=True) return len(data_list) else: log.debug(f"{setName} 第{page}页无数据") return 0 def get_list_data(log, setUrlName, setName, sql_pool): """ 获取列表数据 :param log: logger对象 :param setUrlName: setUrlName :param setName: setName :param sql_pool: sql_pool """ page = 1 max_page = 200 while page <= max_page: try: len_data_list = get_single_page(log, setUrlName, setName, page, sql_pool) except Exception as e: log.error(f"Error getting single page: {e}") len_data_list = 0 if len_data_list < 24: log.debug(f"{setName} 第{page}页数据条数:{len_data_list}, break !!!") break page += 1 @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log) def us_pokemon_main(log): """ 主函数 """ log.info(f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务.............................................') # 配置 MySQL 连接池 sql_pool = MySQLConnectionPool(log=log) if not sql_pool.check_pool_health(): log.error("数据库连接池异常") raise RuntimeError("数据库连接池异常") try: # 获取分类列表 log.debug(".......... 获取分类列表 ..........") for d_dict in SET_US_NAME_LIST: setUrlName = d_dict.get("url_value") setName = d_dict.get("set_name") try: log.info(f"开始获取 {setName} 数据") get_list_data(log, setUrlName, setName, sql_pool) except Exception as e: log.error(f"{inspect.currentframe().f_code.co_name} Request get_list_data error: {e}") except Exception as e: log.error(f'{inspect.currentframe().f_code.co_name} error: {e}') finally: log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............') if __name__ == '__main__': # get_list_data(logger, "si-start-deck-100", "SI: Start Deck 100") # get_list_data(logger, "m2-inferno-x", "M2: Inferno X") # parse_set() us_pokemon_main(logger)