# -*- coding: utf-8 -*- # Author : Charley # Python : 3.8.10 # Date: 2024-10-14 11:01 import time import requests import schedule import user_agent import concurrent.futures from loguru import logger from parsel import Selector from datetime import datetime from mysql_pool import MySQLConnectionPool from tenacity import retry, stop_after_attempt, wait_fixed logger.remove() logger.add("logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00", format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}", level="DEBUG", retention="7 day") def after_log(retry_state): """ retry 回调 :param retry_state: RetryCallState 对象 """ # 检查 args 是否存在且不为空 if retry_state.args and len(retry_state.args) > 0: log = retry_state.args[0] # 获取传入的 logger else: log = logger # 使用全局 logger if retry_state.outcome.failed: log.warning( f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times") else: log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded") @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_proxys(log): """ 获取代理 :return: 代理 """ tunnel = "x371.kdltps.com:15818" kdl_username = "t13753103189895" kdl_password = "o0yefv6z" try: proxies = { "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}, "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel} } return proxies except Exception as e: log.error(f"Error getting proxy: {e}") raise e # @retry(stop=stop_after_attempt(5), wait=wait_fixed(2), after=after_log) # def get_proxys_(log): # # 已购买账户 北美 # # http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927" # # https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927" # http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928" # https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928" # # # url = "https://ifconfig.me" # try: # proxySettings = { # "http": http_proxy, # "https": https_proxy, # } # return proxySettings # except Exception as e: # log.error(f"Error getting proxy: {e}") # raise e @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_price(log, ucid): # ucid = '204224' headers = { "accept": "application/json, text/plain, */*", "referer": "https://robograding.com/feed/00205223/view", "user-agent": user_agent.generate_user_agent() # "x-xsrf-token": "eyJpdiI6ImVBTksreFRBejJXS0tRWmQvUERjRVE9PSIsInZhbHVlIjoiWXA0VktNMGFpQ0RydkR0SmZrVDNhU0ZCQTU3T0pMcExIUFI0VkFkSm85NURGeEdNUXJKU1hxdlIwOHJkbXJGdVcrMXlpVlJPUDRUR3dBV0ZaQ0d2bFhHeTQzaG40dDFMb1lBSVpmcTF6cDh6UHVwTEZEZFViYXo1RnVjQ0dCQ2siLCJtYWMiOiJjYTdkNDJhMDkyNmE3ZmVjZmRiMWY2ZTBjMDBhNzJjMjhkZWYwY2M4NjAwNDAxMTU0ZmI5YjE4NWZhNTVhNWM3IiwidGFnIjoiIn0=" } url = f"https://robograding.com/api/v3/card-price/{ucid}" response = requests.get(url, headers=headers, proxies=get_proxys(), timeout=10) # log.debug(response.text) # response = requests.get(url, headers=headers) if response.status_code != 200: log.debug('请求失败,重试......................') raise Exception('请求失败,重试......................') price = response.json().get('price') return price def transform_date(log, date_str): """ November 13, 2020 类型的日期字符串 转换成年月日格式 :param log: logger :param date_str: 日期字符串 :return: formatted_date """ try: # 解析日期字符串 date_obj = datetime.strptime(date_str, "%B %d, %Y") # 格式化日期 formatted_date = date_obj.strftime("%Y-%m-%d") return formatted_date except Exception as e: log.error(f"Error transforming date: {e}") return None def parse_data(log, cert_id, resp_text, sql_pool): selector = Selector(text=resp_text) tag_div1 = selector.xpath('//main/section/div[1]') tag_div2 = selector.xpath('//main/section/div[2]') name = tag_div2.xpath('//div/h3/text()').get() # title = tag_div2.xpath('//div[@class="feed-view__header__content"]//h2/text()').get() # score = tag_div2.xpath('/div[1]/div/div/div/span[2]/text()').get() score = tag_div2.xpath('./div[1]//span[2]/text()').get() # print(score) tr_list = tag_div2.xpath('./div[@class="w-full pt-4"]/div') ags_set = tr_list.xpath('./div[1]/span[2]/text()').get() card_year = tr_list.xpath('./div[2]/span[2]/text()').get() card_type = tr_list.xpath('./div[3]/span[2]/text()').get() series = tr_list.xpath('./div[4]/span[2]/text()').get() released = tr_list.xpath('./div[5]/span[2]/text()').get() # 转换 owner = tr_list.xpath('./div[6]/span[2]/text()').get() # overall centering_overall = tag_div2.xpath( './div[2]/div/div[1]/div/span/text()').get() surface_overall = tag_div2.xpath( './div[2]/div/div[2]/div/span/text()').get() edges_overall = tag_div2.xpath( './div[2]/div/div[3]/div/span/text()').get() corners_overall = tag_div2.xpath( './div[2]/div/div[4]/div/span/text()').get() front_img = tag_div1.xpath('.//img/@src').get() release_date = transform_date(log, released) if released else None data_dict = { "cert_id": cert_id, "name": name, "score": score, "card_year":card_year, "card_type": card_type, "release_date": release_date, "series": series, "ags_set": ags_set, "owner": owner, "centering_overall": centering_overall, "surface_overall": surface_overall, "edges_overall": edges_overall, "corners_overall": corners_overall, "front_img": front_img, } # print(data_dict) # 保存数据到数据库 sql_pool.insert_one_or_dict(table="ags_record", data=data_dict) @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_data(log, ags_id_list, sql_pool): sql_id = ags_id_list[0] cert_id = ags_id_list[1] log.debug(f"开始处理 {cert_id}") headers = { "user-agent": user_agent.generate_user_agent() } url = f"https://agscard.com/feed/{cert_id}/view" try: response = requests.get(url, headers=headers, proxies=get_proxys(log), timeout=10) # response = requests.get(url, headers=headers) response.raise_for_status() # print(response.text) if "Grades are not available yet" in response.text: log.debug("Grades are not available yet in response.text......................") # 更新数据库状态为未完成 sql_pool.update_one("UPDATE ags_task SET state=2 WHERE id=%s", (sql_id,)) else: parse_data(log, cert_id, response.text, sql_pool) # 更新数据库状态为已完成 sql_pool.update_one("UPDATE ags_task SET state=1 WHERE id=%s", (sql_id,)) except requests.RequestException as e: log.error(f"Request error: {e}") raise def process_urls(log, ids, mysql_pool, batch_size=1000, max_workers=5): with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: for i in range(0, len(ids), batch_size): batch = ids[i:i + batch_size] try: futures_to_urls = {executor.submit(get_data, log, url, mysql_pool): url for url in batch} for future in concurrent.futures.as_completed(futures_to_urls): url = futures_to_urls[future] try: future.result() log.debug(f"处理 {url} 成功") except Exception as exc: log.debug(f"处理 {url} 出错: {exc}") except Exception as e: log.error(f"提交任务失败: {e}") def get_new_task(sql_pool): # 查询最后一条数据 并且 +2000 max_cert = sql_pool.select_one("SELECT MAX(cert_id) FROM ags_record LIMIT 1") end_max_cert_num = int(max_cert[0]) + 2000 end_max_cert_str = f"{end_max_cert_num:08}" # 格式化为 8 位,不足左边补 0 # logger.debug(f'查询到最新的 id 为:{end_max_cert_str[0]}, 开始生成新数据, 并添加到任务表中.........') # 查询新任务列表 """ 每日更新任务为+2000,-1000 """ ags_id_list = sql_pool.select_all( f"SELECT id, cert_id FROM ags_task WHERE state != 1 AND cert_id <= '{end_max_cert_str}' ORDER BY id DESC LIMIT 6000") # 3000别忘了!!!!!!!!!!!!!!!!!! # ags_id_list = sql_pool.select_all("SELECT id,cert_id FROM ags_task WHERE id < 927059 AND state = 0 LIMIT 10000") ags_id_list = [i for i in ags_id_list] return ags_id_list @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log) def main(log): """ 爬虫主函数 """ try: log.info("开始运行 tag_spider 爬虫任务............................................................") sql_pool = MySQLConnectionPool(log=log) if not sql_pool: log.error("数据库连接失败") raise Exception("数据库连接失败") # while True: new_task = get_new_task(sql_pool) if not new_task: log.debug(".............................. 没有新任务,结束本轮任务 ..............................") # break return try: process_urls(log, new_task, sql_pool, batch_size=1000, max_workers=5) except Exception as e: log.error('process urls: ', e) except Exception as e: log.error(f'error:{e}') finally: log.info("爬虫程序运行结束,等待下一轮的采集任务.............") def schedule_task(): """ 爬虫模块 定时任务 的启动文件 """ # 立即运行一次任务 main(logger) # 设置定时任务 schedule.every().day.at("00:01").do(main, log=logger) while True: schedule.run_pending() time.sleep(1) if __name__ == '__main__': schedule_task() # main(logger) # print(get_price('710785')) # get_data(logger, (1061124, '00714066'), None )