| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274 |
- # -*- coding: utf-8 -*-
- # Author : Charley
- # Python : 3.8.10
- # Date : 2025/4/2 18:57
- import inspect
- import random
- import time
- import schedule
- from loguru import logger
- from datetime import datetime
- # from tls_client import Session
- from curl_cffi import Session
- from tenacity import retry, stop_after_attempt, wait_fixed
- from mysql_pool import MySQLConnectionPool
- USER_NAME_LIST = ['fanatics', 'hobbysbestcards'] # 查询的用户名列表 hobbysbestcards
- # cookie = r'ig_did=8D2CD910-0CBD-41CD-A5B4-9EB7E2F8BC91; ps_l=1; ps_n=1; datr=0aYZaGecXnDrIALr4HPo5O0h; mid=aBmm0QALAAFOBiNIagQ4prL9V4Zg; dpr=1.5; csrftoken=1Eeolr1d8t3VMjwNQIeMMQx9JTlyUsGu; sessionid=50762414324%3Af7LRzwBjb06Q7U%3A6%3AAYfUreTnqm7V_o3Pvqt0Tej1vwMQDGjOKw_Zm8TOqA; ds_user_id=50762414324; rur="RVA\05450762414324\0541778817145:01f75b26510d73b461bb75b0f907b2ec268507a83f95fb7c5a8571ced3b614c68af0d6b5"; wd=1707x247'
- PARAMS = r'("app_id":\s*"[^"]+")|("claim":\s*"[^"]+")|("csrf_token":\s*"[^"]+")|(["LSD",[],{"token":\s*"[^"]+")'
- # session = Session(client_identifier="chrome_120", random_tls_extension_order=True)
- session = Session()
- logger.remove()
- logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
- format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
- level="DEBUG", retention="7 day")
- MAX_PAGE = 3 # 最大页数
- def after_log(retry_state):
- """
- retry 回调
- :param retry_state: RetryCallState 对象
- """
- # 检查 args 是否存在且不为空
- if retry_state.args and len(retry_state.args) > 0:
- log = retry_state.args[0] # 获取传入的 logger
- else:
- log = logger # 使用全局 logger
- if retry_state.outcome.failed:
- log.warning(
- f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
- else:
- log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
- @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
- def get_proxys(log):
- """
- 获取代理
- :return: 代理
- """
- tunnel = "x371.kdltps.com:15818"
- kdl_username = "t13753103189895"
- kdl_password = "o0yefv6z"
- try:
- proxies = {
- "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
- "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
- }
- return proxies
- except Exception as e:
- log.error(f"Error getting proxy: {e}")
- raise e
- @retry(stop=stop_after_attempt(5), wait=wait_fixed(15), after=after_log)
- def ajax_request(log, url: str, cookies, params=None):
- """
- 请求封装
- :param log: logger对象
- :param url: api url
- :param cookies: cookies
- :param params: api params
- :return: json object
- """
- try:
- headers = {
- 'sec-fetch-mode': 'cors',
- 'referer': 'https://www.instagram.com/',
- 'x-ig-app-id': '936619743392459',
- 'sec-fetch-site': 'same-site',
- 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
- 'x-asbd-id': '198387',
- 'accept': '*/*',
- 'sec-ch-ua': 'Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
- 'sec-ch-ua-mobile': '?0',
- 'x-ig-www-claim': 'hmac.AR11qy__GsvLpiS4wKBygLGdxs2DxJB1esTkBw7b2QFaHH2d',
- 'authority': 'i.instagram.com',
- 'sec-ch-ua-platform': 'Windows"',
- 'x-instagram-ajax': '1006400593',
- 'sec-fetch-dest': 'empty',
- 'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36',
- 'cookie': f'r"{cookies}"'
- }
- resp = session.get(url, headers=headers, params=params)
- # print(resp.text)
- resp.raise_for_status()
- return resp.json()
- except Exception as e:
- log.error(f"Request failed: {e}")
- raise
- def get_userPosts(log, userName: str, sql_uid_list: list, ins_cookies):
- """
- 从用户名获取所有帖子
- :param log: logger对象
- :param userName: 用户名
- :param sql_uid_list: sql_uid_list 列表
- :param ins_cookies: ins_cookies
- :return: generator
- """
- page = 1
- continuations = [{
- 'count': '12',
- }]
- temp = userName + '/username/'
- while continuations:
- continuation = continuations.pop()
- log.info(f"The page number currently requested is: {page}.........")
- # Url将在第二次请求时更改
- url = 'https://i.instagram.com/api/v1/feed/user' + f'/{temp}'
- resp = ajax_request(log, url, ins_cookies, params=continuation)
- if not resp:
- log.error("API请求失败,跳过当前分页")
- time.sleep(random.uniform(5, 8))
- page += 1
- if page > MAX_PAGE:
- log.info(f"The page number currently requested is: {page}.........")
- break
- # 没有这样的用户
- if not resp.get('user'):
- log.warning(f"checking cookie or unknown/private User: {userName}")
- yield 'checking cookie or unknown/private User: {}'.format(userName)
- else:
- _items = resp.get('items', [])
- # 模拟鼠标按下
- if resp.get('more_available'):
- continuations.append({'count': '12', 'max_id': resp.get('next_max_id')})
- user = resp.get('user')
- temp = user.get('pk_id') if user.get('pk_id') else user.get('pk')
- yield from extract_post(log, _items, userName, sql_uid_list)
- def extract_post(log, posts, user_name: str, sql_uid_list: list):
- """
- 从帖子列表中提取一个帖子
- :param log: logger对象
- :param posts: original instagram posts
- :param user_name: user_name
- :param sql_uid_list: sql_uid_list 列表
- :return: dict of posts
- """
- # print("extract_post")
- if not posts: # 处理 None 或空列表
- log.debug("No posts found.")
- return {}
- for post in posts:
- # print('post:',post)
- caption = post.get('caption')
- created_at_stamp = caption.get('created_at') if caption else post.get('taken_at')
- created_at = datetime.fromtimestamp(created_at_stamp).strftime("%Y-%m-%d %H:%M:%S")
- uid = post.get('code')
- if uid in sql_uid_list:
- log.info(f"uid:{uid} has been processed, skipping................")
- continue
- item = {
- 'user_name': user_name,
- 'uid': uid,
- 'pid': post.get('pk'),
- 'pk_id': post.get('id'),
- 'comment_count': post.get('comment_count'),
- 'like_count': post.get('like_count'),
- 'title': caption.get('text') if caption else None,
- 'created_at': created_at
- }
- # 其他类型可再添加
- types = post.get('media_type')
- if types == 8:
- try:
- imgs_list = [post.get('image_versions2', {}).get('candidates', [{}])[0].get('url') for _ in
- post.get('carousel_media')]
- except Exception as e:
- log.warning(f"imgs_list processing post: {e}")
- # imgs_list = [_.get('url') for _ in post.get('image_versions2', {}).get('candidates', [{}])[0]]
- imgs_list = [post.get('image_versions2', {}).get('candidates', [{}])[0].get('url')]
- imgs = ','.join(imgs_list) if imgs_list else None
- item.update({
- 'imgs_url': imgs,
- 'video_url': post.get('carousel_media', [{}])[0].get('video_versions', [{}])[0].get('url')
- })
- elif types == 2:
- item.update({
- 'imgs_url': None,
- 'video_url': post.get('video_versions', [{}])[0].get('url')
- })
- elif types == 1:
- item.update({
- 'imgs_url': post.get('image_versions2', {}).get('candidates', [{}])[0].get('url'),
- 'video_url': None
- })
- yield item
- @retry(stop=stop_after_attempt(50), wait=wait_fixed(1800), after=after_log)
- def ins_posts_main(log):
- """
- 主函数
- :param log: logger对象
- """
- log.info(
- f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务....................................................')
- # 配置 MySQL 连接池
- sql_pool = MySQLConnectionPool(log=log)
- if not sql_pool.check_pool_health():
- log.error("数据库连接池异常")
- raise RuntimeError("数据库连接池异常")
- try:
- ins_cookies = sql_pool.select_one('select cookies from instagram_cookies')
- ins_cookies = ins_cookies[0] if ins_cookies else None
- for user_name in USER_NAME_LIST:
- log.info(
- f'-------------------------------- 开始爬取用户 {user_name} 的所有帖子 --------------------------------')
- sql_uid_list = sql_pool.select_all('select uid from instagram_posts_record where user_name = %s',
- (user_name,))
- sql_uid_list = [_[0] for _ in sql_uid_list]
- log.debug(f'查询到 uid 列表sql_uid_list的长度为: {len(sql_uid_list)}')
- items_ = get_userPosts(log, user_name, sql_uid_list, ins_cookies)
- for item_ in items_:
- # print(item_)
- sql_pool.insert_one_or_dict('instagram_posts_record', item_)
- sql_uid_list.clear()
- except Exception as e:
- log.error(f'{inspect.currentframe().f_code.co_name} error: {e}')
- finally:
- log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
- def schedule_task():
- """
- 设置定时任务
- """
- ins_posts_main(log=logger)
- # schedule.every().day.at("05:00").do(ins_posts_main, log=logger)
- schedule.every().monday.at("06:00").do(ins_posts_main, log=logger)
- while True:
- schedule.run_pending()
- time.sleep(1)
- if __name__ == '__main__':
- schedule_task()
|