ags_new_daily.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. # -*- coding: utf-8 -*-
  2. # Author : Charley
  3. # Python : 3.8.10
  4. # Date: 2024-10-14 11:01
  5. import json
  6. import time
  7. import requests
  8. import schedule
  9. import user_agent
  10. import concurrent.futures
  11. from loguru import logger
  12. from retrying import retry
  13. from parsel import Selector
  14. from datetime import datetime
  15. from mysql_pool import MySQLConnectionPool
  16. logger.remove()
  17. logger.add("logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
  18. format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
  19. level="DEBUG", retention="7 day")
  20. @retry(stop_max_attempt_number=3, wait_fixed=2000)
  21. def get_proxys_(log):
  22. """
  23. 获取代理
  24. :return: 代理
  25. """
  26. tunnel = "x371.kdltps.com:15818"
  27. kdl_username = "t13753103189895"
  28. kdl_password = "o0yefv6z"
  29. try:
  30. proxies = {
  31. "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
  32. "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
  33. }
  34. return proxies
  35. except Exception as e:
  36. log.error(f"Error getting proxy: {e}")
  37. raise e
  38. @retry(stop_max_attempt_number=5, wait_fixed=1000)
  39. def get_proxys():
  40. # 已购买账户 北美
  41. # http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927"
  42. # https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36927"
  43. http_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928"
  44. https_proxy = "http://u1952150085001297:sJMHl4qc4bM0@proxy.123proxy.cn:36928"
  45. # url = "https://ifconfig.me"
  46. try:
  47. proxySettings = {
  48. "http": http_proxy,
  49. "https": https_proxy,
  50. }
  51. return proxySettings
  52. except Exception as e:
  53. logger.error(f"Error getting proxy: {e}")
  54. raise e
  55. @retry(stop_max_attempt_number=3, wait_fixed=2000)
  56. def get_price(ucid):
  57. # ucid = '204224'
  58. headers = {
  59. "accept": "application/json, text/plain, */*",
  60. "referer": "https://robograding.com/feed/00205223/view",
  61. "user-agent": user_agent.generate_user_agent()
  62. # "x-xsrf-token": "eyJpdiI6ImVBTksreFRBejJXS0tRWmQvUERjRVE9PSIsInZhbHVlIjoiWXA0VktNMGFpQ0RydkR0SmZrVDNhU0ZCQTU3T0pMcExIUFI0VkFkSm85NURGeEdNUXJKU1hxdlIwOHJkbXJGdVcrMXlpVlJPUDRUR3dBV0ZaQ0d2bFhHeTQzaG40dDFMb1lBSVpmcTF6cDh6UHVwTEZEZFViYXo1RnVjQ0dCQ2siLCJtYWMiOiJjYTdkNDJhMDkyNmE3ZmVjZmRiMWY2ZTBjMDBhNzJjMjhkZWYwY2M4NjAwNDAxMTU0ZmI5YjE4NWZhNTVhNWM3IiwidGFnIjoiIn0="
  63. }
  64. url = f"https://robograding.com/api/v3/card-price/{ucid}"
  65. response = requests.get(url, headers=headers, proxies=get_proxys(), timeout=10)
  66. # response = requests.get(url, headers=headers)
  67. if response.status_code != 200:
  68. logger.debug('请求失败,重试......................')
  69. raise Exception('请求失败,重试......................')
  70. price = response.json().get('price')
  71. return price
  72. def transform_date(date_str):
  73. """
  74. November 13, 2020 类型的日期字符串 转换成年月日格式
  75. :param date_str: 日期字符串
  76. :return: formatted_date
  77. """
  78. # 解析日期字符串
  79. date_obj = datetime.strptime(date_str, "%B %d, %Y")
  80. # 格式化日期
  81. formatted_date = date_obj.strftime("%Y-%m-%d")
  82. return formatted_date
  83. def save_data(sql_pool, info):
  84. sql = "INSERT INTO ags_record (cert_id, name, title, score, card_type, release_date, series, card, ags_set, owner, centering_overall, surface_overall, edges_overall, corners_overall, price, front_img, back_img) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
  85. sql_pool.insert_one(sql, info)
  86. def parse_data(cert_id, resp_text, sql_pool):
  87. selector = Selector(text=resp_text)
  88. name = selector.xpath('//div[@class="feed-view__header__content"]//h1/text()').get()
  89. title = selector.xpath('//div[@class="feed-view__header__content"]//h2/text()').get()
  90. score = selector.xpath(
  91. '//div[@class="feed-view__header__content"]//p[@class="feed-view__header__grade-score"]/text()').get()
  92. tr_list = selector.xpath('//div[@class="feed-view__right-side"]//tbody/tr[not(contains(@class, "feed-view"))]')
  93. result_dict = {'card_type': '',
  94. 'release_date': '',
  95. 'series': '',
  96. 'card': '',
  97. 'set': '',
  98. 'owner': ''}
  99. for tr in tr_list:
  100. item_key = tr.xpath('./td[1]/h3/text()').get()
  101. item_val = tr.xpath('./td[2]/text()').get()
  102. try:
  103. item_key = item_key.strip().replace(':', '')
  104. item_val = item_val.strip().replace(':', '')
  105. # print('item_key,item_val', item_key, item_val)
  106. key_snake_case = item_key.replace(' ', '_').replace('/', '_').lower()
  107. if key_snake_case in list(result_dict.keys()):
  108. # print(key_snake_case, item_val)
  109. result_dict[key_snake_case] = item_val
  110. except Exception as e:
  111. logger.debug(e)
  112. # overall
  113. centering_overall = selector.xpath(
  114. '//div[@class="feed-view__breakdown__scores feed-view__breakdown__scores--contained"]/div[1]/p[2]/text()').get()
  115. surface_overall = selector.xpath(
  116. '//div[@class="feed-view__breakdown__scores feed-view__breakdown__scores--contained"]/div[2]/p[2]/text()').get()
  117. edges_overall = selector.xpath(
  118. '//div[@class="feed-view__breakdown__scores feed-view__breakdown__scores--contained"]/div[3]/p[2]/text()').get()
  119. corners_overall = selector.xpath(
  120. '//div[@class="feed-view__breakdown__scores feed-view__breakdown__scores--contained"]/div[4]/p[2]/text()').get()
  121. # 获取用户id
  122. user_card_id = selector.xpath(
  123. '//div[@class="feed-view__breakdown__scores-holder feed-view__breakdown__scores-card-price"]/div/@data-user-card-id').get()
  124. if user_card_id:
  125. price = get_price(user_card_id)
  126. else:
  127. price = None
  128. img_json = selector.xpath(
  129. '//section[@class="feed-view__content"]//div[@class="feed-view__card"]/div/@data-images').get()
  130. img_dict = json.loads(img_json)
  131. front_img = img_dict.get('front_slab_image')
  132. back_img = img_dict.get('back_slab_image')
  133. if not front_img:
  134. front_img = img_dict.get('image_path')
  135. back_img = None
  136. release_date = transform_date(result_dict.get('release_date')) if result_dict.get('release_date') else None
  137. info = (cert_id, name, title, score, result_dict.get('card_type'), release_date,
  138. result_dict.get('series'), result_dict.get('card'), result_dict.get('set'), result_dict.get('owner'),
  139. centering_overall, surface_overall, edges_overall, corners_overall, price, front_img, back_img)
  140. # print(info)
  141. save_data(sql_pool, info)
  142. @retry(stop_max_attempt_number=5, wait_fixed=2000)
  143. def get_data(ags_id_list, sql_pool):
  144. sql_id = ags_id_list[0]
  145. cert_id = ags_id_list[1]
  146. logger.debug(f"开始处理 {cert_id}")
  147. headers = {
  148. "user-agent": user_agent.generate_user_agent()
  149. }
  150. url = f"https://robograding.com/feed/{cert_id}/view"
  151. try:
  152. response = requests.get(url, headers=headers, proxies=get_proxys(), timeout=10)
  153. # response = requests.get(url, headers=headers)
  154. response.raise_for_status()
  155. if "Grades are not available yet" in response.text:
  156. logger.debug("Grades are not available yet in response.text......................")
  157. # 更新数据库状态为未完成
  158. sql_pool.update_one("UPDATE ags_task SET state=2 WHERE id=%s", (sql_id,))
  159. else:
  160. parse_data(cert_id, response.text, sql_pool)
  161. # 更新数据库状态为已完成
  162. sql_pool.update_one("UPDATE ags_task SET state=1 WHERE id=%s", (sql_id,))
  163. except requests.RequestException as e:
  164. logger.error(f"Request error: {e}")
  165. raise # 可以选择重新抛出异常以便外部处理
  166. def process_urls(ids, mysql_pool, batch_size=1000, max_workers=5):
  167. with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
  168. for i in range(0, len(ids), batch_size):
  169. batch = ids[i:i + batch_size]
  170. try:
  171. futures_to_urls = {executor.submit(get_data, url, mysql_pool): url for url in batch}
  172. for future in concurrent.futures.as_completed(futures_to_urls):
  173. url = futures_to_urls[future]
  174. try:
  175. future.result()
  176. logger.debug(f"处理 {url} 成功")
  177. except Exception as exc:
  178. logger.debug(f"处理 {url} 出错: {exc}")
  179. except Exception as e:
  180. logger.error(f"提交任务失败: {e}")
  181. # def get_add_cert(start_, end_):
  182. # num_list = []
  183. # # 循环生成8位数的字符串
  184. # for num in range(start_, end_ + 1):
  185. # # 使用zfill将数字转换为字符串,并确保长度为8位,不足部分用0填充
  186. # formatted_num = str(num).zfill(8)
  187. # # print(formatted_num)
  188. # num_list.append(formatted_num)
  189. # return num_list
  190. def get_new_task(sql_pool):
  191. # 查询最后一条数据 并且 +2000
  192. max_cert = sql_pool.select_one("SELECT MAX(cert_id) FROM ags_record LIMIT 1")
  193. end_max_cert_num = int(max_cert[0]) + 2000
  194. end_max_cert_str = f"{end_max_cert_num:08}" # 格式化为 8 位,不足左边补 0
  195. # logger.debug(f'查询到最新的 id 为:{end_max_cert_str[0]}, 开始生成新数据, 并添加到任务表中.........')
  196. # 查询新任务列表
  197. """
  198. 每日更新任务为+2000,-1000
  199. """
  200. ags_id_list = sql_pool.select_all(
  201. f"SELECT id, cert_id FROM ags_task WHERE state != 1 AND cert_id <= '{end_max_cert_str}' ORDER BY id DESC LIMIT 3000")
  202. # ags_id_list = sql_pool.select_all("SELECT id,cert_id FROM ags_task WHERE id < 927059 AND state = 0 LIMIT 10000")
  203. ags_id_list = [i for i in ags_id_list]
  204. return ags_id_list
  205. @retry(stop_max_attempt_number=5, wait_fixed=2000)
  206. def main():
  207. """
  208. 爬虫主函数
  209. """
  210. try:
  211. logger.info("开始运行 tag_spider 爬虫任务............................................................")
  212. sql_pool = MySQLConnectionPool(log=logger)
  213. if not sql_pool:
  214. logger.error("数据库连接失败")
  215. raise Exception("数据库连接失败")
  216. # while True:
  217. new_task = get_new_task(sql_pool)
  218. if not new_task:
  219. logger.debug(".............................. 没有新任务,结束本轮任务 ..............................")
  220. # break
  221. return
  222. try:
  223. process_urls(new_task, sql_pool, batch_size=1000, max_workers=5)
  224. except Exception as e:
  225. logger.error('process urls: ', e)
  226. except Exception as e:
  227. logger.error(f'error:{e}')
  228. finally:
  229. logger.info("爬虫程序运行结束,等待下一轮的采集任务.............")
  230. def schedule_task():
  231. main()
  232. schedule.every().day.at("00:01").do(main)
  233. while True:
  234. schedule.run_pending()
  235. time.sleep(1)
  236. if __name__ == '__main__':
  237. schedule_task()
  238. # main()