ins_posts_spider.py 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. # -*- coding: utf-8 -*-
  2. # Author : Charley
  3. # Python : 3.8.10
  4. # Date : 2025/4/2 18:57
  5. import inspect
  6. import random
  7. import time
  8. import schedule
  9. from loguru import logger
  10. from datetime import datetime
  11. # from tls_client import Session
  12. from curl_cffi import Session
  13. from tenacity import retry, stop_after_attempt, wait_fixed
  14. from mysql_pool import MySQLConnectionPool
  15. USER_NAME_LIST = ['fanatics', 'hobbysbestcards'] # 查询的用户名列表 hobbysbestcards
  16. # cookie = r'ig_did=8D2CD910-0CBD-41CD-A5B4-9EB7E2F8BC91; ps_l=1; ps_n=1; datr=0aYZaGecXnDrIALr4HPo5O0h; mid=aBmm0QALAAFOBiNIagQ4prL9V4Zg; dpr=1.5; csrftoken=1Eeolr1d8t3VMjwNQIeMMQx9JTlyUsGu; sessionid=50762414324%3Af7LRzwBjb06Q7U%3A6%3AAYfUreTnqm7V_o3Pvqt0Tej1vwMQDGjOKw_Zm8TOqA; ds_user_id=50762414324; rur="RVA\05450762414324\0541778817145:01f75b26510d73b461bb75b0f907b2ec268507a83f95fb7c5a8571ced3b614c68af0d6b5"; wd=1707x247'
  17. PARAMS = r'("app_id":\s*"[^"]+")|("claim":\s*"[^"]+")|("csrf_token":\s*"[^"]+")|(["LSD",[],{"token":\s*"[^"]+")'
  18. # session = Session(client_identifier="chrome_120", random_tls_extension_order=True)
  19. session = Session()
  20. logger.remove()
  21. logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
  22. format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
  23. level="DEBUG", retention="7 day")
  24. MAX_PAGE = 3 # 最大页数
  25. def after_log(retry_state):
  26. """
  27. retry 回调
  28. :param retry_state: RetryCallState 对象
  29. """
  30. # 检查 args 是否存在且不为空
  31. if retry_state.args and len(retry_state.args) > 0:
  32. log = retry_state.args[0] # 获取传入的 logger
  33. else:
  34. log = logger # 使用全局 logger
  35. if retry_state.outcome.failed:
  36. log.warning(
  37. f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
  38. else:
  39. log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
  40. @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
  41. def get_proxys(log):
  42. """
  43. 获取代理
  44. :return: 代理
  45. """
  46. tunnel = "x371.kdltps.com:15818"
  47. kdl_username = "t13753103189895"
  48. kdl_password = "o0yefv6z"
  49. try:
  50. proxies = {
  51. "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
  52. "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
  53. }
  54. return proxies
  55. except Exception as e:
  56. log.error(f"Error getting proxy: {e}")
  57. raise e
  58. @retry(stop=stop_after_attempt(5), wait=wait_fixed(15), after=after_log)
  59. def ajax_request(log, url: str, cookies, params=None):
  60. """
  61. 请求封装
  62. :param log: logger对象
  63. :param url: api url
  64. :param cookies: cookies
  65. :param params: api params
  66. :return: json object
  67. """
  68. try:
  69. headers = {
  70. 'sec-fetch-mode': 'cors',
  71. 'referer': 'https://www.instagram.com/',
  72. 'x-ig-app-id': '936619743392459',
  73. 'sec-fetch-site': 'same-site',
  74. 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
  75. 'x-asbd-id': '198387',
  76. 'accept': '*/*',
  77. 'sec-ch-ua': 'Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
  78. 'sec-ch-ua-mobile': '?0',
  79. 'x-ig-www-claim': 'hmac.AR11qy__GsvLpiS4wKBygLGdxs2DxJB1esTkBw7b2QFaHH2d',
  80. 'authority': 'i.instagram.com',
  81. 'sec-ch-ua-platform': 'Windows"',
  82. 'x-instagram-ajax': '1006400593',
  83. 'sec-fetch-dest': 'empty',
  84. 'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36',
  85. 'cookie': f'r"{cookies}"'
  86. }
  87. resp = session.get(url, headers=headers, params=params)
  88. # print(resp.text)
  89. resp.raise_for_status()
  90. return resp.json()
  91. except Exception as e:
  92. log.error(f"Request failed: {e}")
  93. raise
  94. def get_userPosts(log, userName: str, sql_uid_list: list, ins_cookies):
  95. """
  96. 从用户名获取所有帖子
  97. :param log: logger对象
  98. :param userName: 用户名
  99. :param sql_uid_list: sql_uid_list 列表
  100. :param ins_cookies: ins_cookies
  101. :return: generator
  102. """
  103. page = 1
  104. continuations = [{
  105. 'count': '12',
  106. }]
  107. temp = userName + '/username/'
  108. while continuations:
  109. continuation = continuations.pop()
  110. log.info(f"The page number currently requested is: {page}.........")
  111. # Url将在第二次请求时更改
  112. url = 'https://i.instagram.com/api/v1/feed/user' + f'/{temp}'
  113. resp = ajax_request(log, url, ins_cookies, params=continuation)
  114. if not resp:
  115. log.error("API请求失败,跳过当前分页")
  116. time.sleep(random.uniform(5, 8))
  117. page += 1
  118. if page > MAX_PAGE:
  119. log.info(f"The page number currently requested is: {page}.........")
  120. break
  121. # 没有这样的用户
  122. if not resp.get('user'):
  123. log.warning(f"checking cookie or unknown/private User: {userName}")
  124. yield 'checking cookie or unknown/private User: {}'.format(userName)
  125. else:
  126. _items = resp.get('items', [])
  127. # 模拟鼠标按下
  128. if resp.get('more_available'):
  129. continuations.append({'count': '12', 'max_id': resp.get('next_max_id')})
  130. user = resp.get('user')
  131. temp = user.get('pk_id') if user.get('pk_id') else user.get('pk')
  132. yield from extract_post(log, _items, userName, sql_uid_list)
  133. def extract_post(log, posts, user_name: str, sql_uid_list: list):
  134. """
  135. 从帖子列表中提取一个帖子
  136. :param log: logger对象
  137. :param posts: original instagram posts
  138. :param user_name: user_name
  139. :param sql_uid_list: sql_uid_list 列表
  140. :return: dict of posts
  141. """
  142. # print("extract_post")
  143. if not posts: # 处理 None 或空列表
  144. log.debug("No posts found.")
  145. return {}
  146. for post in posts:
  147. # print('post:',post)
  148. caption = post.get('caption')
  149. created_at_stamp = caption.get('created_at') if caption else post.get('taken_at')
  150. created_at = datetime.fromtimestamp(created_at_stamp).strftime("%Y-%m-%d %H:%M:%S")
  151. uid = post.get('code')
  152. if uid in sql_uid_list:
  153. log.info(f"uid:{uid} has been processed, skipping................")
  154. continue
  155. item = {
  156. 'user_name': user_name,
  157. 'uid': uid,
  158. 'pid': post.get('pk'),
  159. 'pk_id': post.get('id'),
  160. 'comment_count': post.get('comment_count'),
  161. 'like_count': post.get('like_count'),
  162. 'title': caption.get('text') if caption else None,
  163. 'created_at': created_at
  164. }
  165. # 其他类型可再添加
  166. types = post.get('media_type')
  167. if types == 8:
  168. try:
  169. imgs_list = [post.get('image_versions2', {}).get('candidates', [{}])[0].get('url') for _ in
  170. post.get('carousel_media')]
  171. except Exception as e:
  172. log.warning(f"imgs_list processing post: {e}")
  173. # imgs_list = [_.get('url') for _ in post.get('image_versions2', {}).get('candidates', [{}])[0]]
  174. imgs_list = [post.get('image_versions2', {}).get('candidates', [{}])[0].get('url')]
  175. imgs = ','.join(imgs_list) if imgs_list else None
  176. item.update({
  177. 'imgs_url': imgs,
  178. 'video_url': post.get('carousel_media', [{}])[0].get('video_versions', [{}])[0].get('url')
  179. })
  180. elif types == 2:
  181. item.update({
  182. 'imgs_url': None,
  183. 'video_url': post.get('video_versions', [{}])[0].get('url')
  184. })
  185. elif types == 1:
  186. item.update({
  187. 'imgs_url': post.get('image_versions2', {}).get('candidates', [{}])[0].get('url'),
  188. 'video_url': None
  189. })
  190. yield item
  191. @retry(stop=stop_after_attempt(50), wait=wait_fixed(1800), after=after_log)
  192. def ins_posts_main(log):
  193. """
  194. 主函数
  195. :param log: logger对象
  196. """
  197. log.info(
  198. f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务....................................................')
  199. # 配置 MySQL 连接池
  200. sql_pool = MySQLConnectionPool(log=log)
  201. if not sql_pool.check_pool_health():
  202. log.error("数据库连接池异常")
  203. raise RuntimeError("数据库连接池异常")
  204. try:
  205. ins_cookies = sql_pool.select_one('select cookies from instagram_cookies')
  206. ins_cookies = ins_cookies[0] if ins_cookies else None
  207. for user_name in USER_NAME_LIST:
  208. log.info(
  209. f'-------------------------------- 开始爬取用户 {user_name} 的所有帖子 --------------------------------')
  210. sql_uid_list = sql_pool.select_all('select uid from instagram_posts_record where user_name = %s',
  211. (user_name,))
  212. sql_uid_list = [_[0] for _ in sql_uid_list]
  213. log.debug(f'查询到 uid 列表sql_uid_list的长度为: {len(sql_uid_list)}')
  214. items_ = get_userPosts(log, user_name, sql_uid_list, ins_cookies)
  215. for item_ in items_:
  216. # print(item_)
  217. sql_pool.insert_one_or_dict('instagram_posts_record', item_)
  218. sql_uid_list.clear()
  219. except Exception as e:
  220. log.error(f'{inspect.currentframe().f_code.co_name} error: {e}')
  221. finally:
  222. log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
  223. def schedule_task():
  224. """
  225. 设置定时任务
  226. """
  227. ins_posts_main(log=logger)
  228. # schedule.every().day.at("05:00").do(ins_posts_main, log=logger)
  229. schedule.every().monday.at("06:00").do(ins_posts_main, log=logger)
  230. while True:
  231. schedule.run_pending()
  232. time.sleep(1)
  233. if __name__ == '__main__':
  234. schedule_task()