ags_new_daily.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. # -*- coding: utf-8 -*-
  2. # Author : Charley
  3. # Python : 3.8.10
  4. # Date: 2024-10-14 11:01
  5. import time
  6. import requests
  7. import schedule
  8. import user_agent
  9. import concurrent.futures
  10. from loguru import logger
  11. from parsel import Selector
  12. from datetime import datetime
  13. from mysql_pool import MySQLConnectionPool
  14. from tenacity import retry, stop_after_attempt, wait_fixed
  15. logger.remove()
  16. logger.add("logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
  17. format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
  18. level="DEBUG", retention="7 day")
  19. def after_log(retry_state):
  20. """
  21. retry 回调
  22. :param retry_state: RetryCallState 对象
  23. """
  24. # 检查 args 是否存在且不为空
  25. if retry_state.args and len(retry_state.args) > 0:
  26. log = retry_state.args[0] # 获取传入的 logger
  27. else:
  28. log = logger # 使用全局 logger
  29. if retry_state.outcome.failed:
  30. log.warning(
  31. f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
  32. else:
  33. log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
  34. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  35. def get_proxys(log):
  36. """
  37. 获取代理
  38. :return: 代理
  39. """
  40. tunnel = "x371.kdltps.com:15818"
  41. kdl_username = "t13753103189895"
  42. kdl_password = "o0yefv6z"
  43. try:
  44. proxies = {
  45. "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
  46. "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
  47. }
  48. return proxies
  49. except Exception as e:
  50. log.error(f"Error getting proxy: {e}")
  51. raise e
  52. # @retry(stop=stop_after_attempt(5), wait=wait_fixed(2), after=after_log)
  53. # def get_proxys_(log):
  54. # # 已购买账户 北美
  55. # # http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927"
  56. # # https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927"
  57. # http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928"
  58. # https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928"
  59. #
  60. # # url = "https://ifconfig.me"
  61. # try:
  62. # proxySettings = {
  63. # "http": http_proxy,
  64. # "https": https_proxy,
  65. # }
  66. # return proxySettings
  67. # except Exception as e:
  68. # log.error(f"Error getting proxy: {e}")
  69. # raise e
  70. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  71. def get_price(log, ucid):
  72. # ucid = '204224'
  73. headers = {
  74. "accept": "application/json, text/plain, */*",
  75. "referer": "https://robograding.com/feed/00205223/view",
  76. "user-agent": user_agent.generate_user_agent()
  77. # "x-xsrf-token": "eyJpdiI6ImVBTksreFRBejJXS0tRWmQvUERjRVE9PSIsInZhbHVlIjoiWXA0VktNMGFpQ0RydkR0SmZrVDNhU0ZCQTU3T0pMcExIUFI0VkFkSm85NURGeEdNUXJKU1hxdlIwOHJkbXJGdVcrMXlpVlJPUDRUR3dBV0ZaQ0d2bFhHeTQzaG40dDFMb1lBSVpmcTF6cDh6UHVwTEZEZFViYXo1RnVjQ0dCQ2siLCJtYWMiOiJjYTdkNDJhMDkyNmE3ZmVjZmRiMWY2ZTBjMDBhNzJjMjhkZWYwY2M4NjAwNDAxMTU0ZmI5YjE4NWZhNTVhNWM3IiwidGFnIjoiIn0="
  78. }
  79. url = f"https://robograding.com/api/v3/card-price/{ucid}"
  80. response = requests.get(url, headers=headers, proxies=get_proxys(), timeout=10)
  81. # log.debug(response.text)
  82. # response = requests.get(url, headers=headers)
  83. if response.status_code != 200:
  84. log.debug('请求失败,重试......................')
  85. raise Exception('请求失败,重试......................')
  86. price = response.json().get('price')
  87. return price
  88. def transform_date(log, date_str):
  89. """
  90. November 13, 2020 类型的日期字符串 转换成年月日格式
  91. :param log: logger
  92. :param date_str: 日期字符串
  93. :return: formatted_date
  94. """
  95. try:
  96. # 解析日期字符串
  97. date_obj = datetime.strptime(date_str, "%B %d, %Y")
  98. # 格式化日期
  99. formatted_date = date_obj.strftime("%Y-%m-%d")
  100. return formatted_date
  101. except Exception as e:
  102. log.error(f"Error transforming date: {e}")
  103. return None
  104. def parse_data(log, cert_id, resp_text, sql_pool):
  105. selector = Selector(text=resp_text)
  106. tag_div1 = selector.xpath('//main/section/div[1]')
  107. tag_div2 = selector.xpath('//main/section/div[2]')
  108. name = tag_div2.xpath('//div/h3/text()').get()
  109. # title = tag_div2.xpath('//div[@class="feed-view__header__content"]//h2/text()').get()
  110. # score = tag_div2.xpath('/div[1]/div/div/div/span[2]/text()').get()
  111. score = tag_div2.xpath('./div[1]//span[2]/text()').get()
  112. # print(score)
  113. tr_list = tag_div2.xpath('./div[@class="w-full pt-4"]/div')
  114. ags_set = tr_list.xpath('./div[1]/span[2]/text()').get()
  115. card_year = tr_list.xpath('./div[2]/span[2]/text()').get()
  116. card_type = tr_list.xpath('./div[3]/span[2]/text()').get()
  117. series = tr_list.xpath('./div[4]/span[2]/text()').get()
  118. released = tr_list.xpath('./div[5]/span[2]/text()').get() # 转换
  119. owner = tr_list.xpath('./div[6]/span[2]/text()').get()
  120. # overall
  121. centering_overall = tag_div2.xpath(
  122. './div[2]/div/div[1]/div/span/text()').get()
  123. surface_overall = tag_div2.xpath(
  124. './div[2]/div/div[2]/div/span/text()').get()
  125. edges_overall = tag_div2.xpath(
  126. './div[2]/div/div[3]/div/span/text()').get()
  127. corners_overall = tag_div2.xpath(
  128. './div[2]/div/div[4]/div/span/text()').get()
  129. front_img = tag_div1.xpath('.//img/@src').get()
  130. release_date = transform_date(log, released) if released else None
  131. data_dict = {
  132. "cert_id": cert_id,
  133. "name": name,
  134. "score": score,
  135. "card_year":card_year,
  136. "card_type": card_type,
  137. "release_date": release_date,
  138. "series": series,
  139. "ags_set": ags_set,
  140. "owner": owner,
  141. "centering_overall": centering_overall,
  142. "surface_overall": surface_overall,
  143. "edges_overall": edges_overall,
  144. "corners_overall": corners_overall,
  145. "front_img": front_img,
  146. }
  147. # print(data_dict)
  148. # 保存数据到数据库
  149. sql_pool.insert_one_or_dict(table="ags_record", data=data_dict)
  150. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  151. def get_data(log, ags_id_list, sql_pool):
  152. sql_id = ags_id_list[0]
  153. cert_id = ags_id_list[1]
  154. log.debug(f"开始处理 {cert_id}")
  155. headers = {
  156. "user-agent": user_agent.generate_user_agent()
  157. }
  158. url = f"https://agscard.com/feed/{cert_id}/view"
  159. try:
  160. response = requests.get(url, headers=headers, proxies=get_proxys(log), timeout=10)
  161. # response = requests.get(url, headers=headers)
  162. response.raise_for_status()
  163. # print(response.text)
  164. if "Grades are not available yet" in response.text:
  165. log.debug("Grades are not available yet in response.text......................")
  166. # 更新数据库状态为未完成
  167. sql_pool.update_one("UPDATE ags_task SET state=2 WHERE id=%s", (sql_id,))
  168. else:
  169. parse_data(log, cert_id, response.text, sql_pool)
  170. # 更新数据库状态为已完成
  171. sql_pool.update_one("UPDATE ags_task SET state=1 WHERE id=%s", (sql_id,))
  172. except requests.RequestException as e:
  173. log.error(f"Request error: {e}")
  174. raise
  175. def process_urls(log, ids, mysql_pool, batch_size=1000, max_workers=5):
  176. with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
  177. for i in range(0, len(ids), batch_size):
  178. batch = ids[i:i + batch_size]
  179. try:
  180. futures_to_urls = {executor.submit(get_data, log, url, mysql_pool): url for url in batch}
  181. for future in concurrent.futures.as_completed(futures_to_urls):
  182. url = futures_to_urls[future]
  183. try:
  184. future.result()
  185. log.debug(f"处理 {url} 成功")
  186. except Exception as exc:
  187. log.debug(f"处理 {url} 出错: {exc}")
  188. except Exception as e:
  189. log.error(f"提交任务失败: {e}")
  190. def get_new_task(sql_pool):
  191. # 查询最后一条数据 并且 +2000
  192. max_cert = sql_pool.select_one("SELECT MAX(cert_id) FROM ags_record LIMIT 1")
  193. end_max_cert_num = int(max_cert[0]) + 2000
  194. end_max_cert_str = f"{end_max_cert_num:08}" # 格式化为 8 位,不足左边补 0
  195. # logger.debug(f'查询到最新的 id 为:{end_max_cert_str[0]}, 开始生成新数据, 并添加到任务表中.........')
  196. # 查询新任务列表
  197. """
  198. 每日更新任务为+2000,-1000
  199. """
  200. ags_id_list = sql_pool.select_all(
  201. f"SELECT id, cert_id FROM ags_task WHERE state != 1 AND cert_id <= '{end_max_cert_str}' ORDER BY id DESC LIMIT 6000") # 3000别忘了!!!!!!!!!!!!!!!!!!
  202. # ags_id_list = sql_pool.select_all("SELECT id,cert_id FROM ags_task WHERE id < 927059 AND state = 0 LIMIT 10000")
  203. ags_id_list = [i for i in ags_id_list]
  204. return ags_id_list
  205. @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
  206. def main(log):
  207. """
  208. 爬虫主函数
  209. """
  210. try:
  211. log.info("开始运行 tag_spider 爬虫任务............................................................")
  212. sql_pool = MySQLConnectionPool(log=log)
  213. if not sql_pool:
  214. log.error("数据库连接失败")
  215. raise Exception("数据库连接失败")
  216. # while True:
  217. new_task = get_new_task(sql_pool)
  218. if not new_task:
  219. log.debug(".............................. 没有新任务,结束本轮任务 ..............................")
  220. # break
  221. return
  222. try:
  223. process_urls(log, new_task, sql_pool, batch_size=1000, max_workers=5)
  224. except Exception as e:
  225. log.error('process urls: ', e)
  226. except Exception as e:
  227. log.error(f'error:{e}')
  228. finally:
  229. log.info("爬虫程序运行结束,等待下一轮的采集任务.............")
  230. def schedule_task():
  231. """
  232. 爬虫模块 定时任务 的启动文件
  233. """
  234. # 立即运行一次任务
  235. main(logger)
  236. # 设置定时任务
  237. schedule.every().day.at("00:01").do(main, log=logger)
  238. while True:
  239. schedule.run_pending()
  240. time.sleep(1)
  241. if __name__ == '__main__':
  242. schedule_task()
  243. # main(logger)
  244. # print(get_price('710785'))
  245. # get_data(logger, (1061124, '00714066'), None )