# -*- coding: utf-8 -*- # Author : Charley # Python : 3.10.8 # Date : 2025/6/16 13:48 import random import time import inspect import requests import schedule from loguru import logger from mysql_pool import MySQLConnectionPool from tenacity import retry, stop_after_attempt, wait_fixed logger.remove() logger.add("./logs/{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00", format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}", level="DEBUG", retention="7 day") def after_log(retry_state): """ retry 回调 :param retry_state: RetryCallState 对象 """ # 检查 args 是否存在且不为空 if retry_state.args and len(retry_state.args) > 0: log = retry_state.args[0] # 获取传入的 logger else: log = logger # 使用全局 logger if retry_state.outcome.failed: log.warning( f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times") else: log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded") @retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log) def get_proxys(log): """ 获取代理 :return: 代理 """ tunnel = "x371.kdltps.com:15818" kdl_username = "t13753103189895" kdl_password = "o0yefv6z" try: proxies = { "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}, "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel} } return proxies except Exception as e: log.error(f"Error getting proxy: {e}") raise e @retry(stop=stop_after_attempt(10), wait=wait_fixed(2), after=after_log) def get_resp_one_page(log, page): log.debug(f"Getting page {page}..............................") headers = { "accept": "application/json, text/plain, */*", "accept-language": "en,zh-CN;q=0.9,zh;q=0.8", "bx-v": "2.5.31", "priority": "u=1, i", "referer": "https://www.lazada.com.my/", "sec-ch-ua": "\"Google Chrome\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"", "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": "\"Windows\"", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36", # "x-csrf-token": "e4b1ee587f333" } cookies = { "__wpkreporterwid_": "9a807029-4faf-408f-8741-5bd7a5c9bb49", "lazada_share_info": "1943013429_100_100_0_1943015429_null", "t_fv": "1750044788744", "t_uid": "IGNxirTJDPLVkegWMu0OjthsufRjWNgo", "cna": "dn7WIL8YbU0CAd+mqILgq5w2", "hng": "MY|en-MY|MYR|458", "hng.sig": "cLZ14_ZixioeDzk3FYgOHWtqfULyySre3d96Ouq-H6k", "EGG_SESS": "S_Gs1wHo9OvRHCMp98md7JBvEEnBpIGRATFScrAN0TSKROM9JS5ucnMrutmhJ6e0wTv_5ShoQDavnBd8GRhoufb8Sr_sUum68vg6woIDhiBw82Q8q_zFDAesYtMyDMb_pOyNXueCNJYEWgZDx1h2vQdEJsqeKN4AWpCQgGp-y5g=", "lwrid": "AgGXdtASjDNi5VqqCd6sX39uIxoL", "lzd_cid": "ab175d2a-6612-4a32-b759-475063ce8e55", "lzd_sid": "1e76f843e0504fb8ee3940a1b599e03d", "_tb_token_": "e4b1ee587f333", "xlly_s": "1", "lwrtk": "AAIEaFAB7osFX5Jf9a73Sg0oQN02mKk6aQXx1EKkYzrYCsXMq5K8D/I=", "_bl_uid": "g3m1Xbthy3wj3Uo1tqv3bhjhjdp9", "_gcl_au": "1.1.208579588.1750045133", "_ga": "GA1.3.1640854487.1750045147", "_gid": "GA1.3.661757065.1750045147", "AMCVS_126E248D54200F960A4C98C6%40AdobeOrg": "1", "AMCV_126E248D54200F960A4C98C6%40AdobeOrg": "-1124106680%7CMCIDTS%7C20256%7CMCMID%7C67086440579640317143819662540753251056%7CMCAAMLH-1750649953%7C11%7CMCAAMB-1750649953%7CRKhpRz8krg2tLO6pguXWp5olkAcUniQYPHaMWWgdJ3xzPWQmdj0y%7CMCOPTOUT-1750052353s%7CNONE%7CvVersion%7C5.2.0", "_m_h5_tk": "a76b9e5019c389f94d6cdde2ad6ed1fd_1750060971812", "_m_h5_tk_enc": "3cf92060e0c57a1f6a970599d58b33e5", "t_sid": "Wc3j5Z64n0pv3RftLZRHnRvF9SWuMFH0", "utm_channel": "NA", "_uetsid": "7514f0104a6311f0be7d6b89a4c1e673", "_uetvid": "75150b504a6311f0974d59c1d64da194", "isg": "BGRk0ZPK0JhEdSRUv65oxzGYNWJW_Yhn-5NlwX6BsC9nKQfzpgyY9rub64kxysC_", "_ga_6VT623WX3F": "GS2.3.s1750056508$o2$g1$t1750056548$j20$l0$h0", "cto_bundle": "6xFME19zVHhMJTJGSGFlQ2lqZXRVNEZuZFhzd2VZMkNCRU1rTHRSeEJyYXQ5UUglMkYyTHRUQkFWeiUyQlV1OEI0aXV2JTJGa0FUNHQ1UlBVVTdBWnJxbzFuTGFaUDY3SEZ0TjRqdmElMkZxR05yT2dNOXNjUWNRTFVnOVZ3dmVqenpyZzZ4WUUxRmlYcDZxSXgyN2NWJTJGNnlERm1JUHlYZjBKbVRjZnNSMXRUTUxDMXIlMkZ6NjlMJTJCczhJJTNE", "epssw": "9*mmCGJmjkWHDn9AvOutXURpqiQILO7tvzut2mZuTmu1HBO9JHdSZ3huBwdSa4dImm3tDm6umZPzy3AHysIiHmugXr0_mwvDom4MTI51KKu7zxCst0hjhSKSv92v2S2MqVWY3I23XQR26pVAufeJtQkKSyIfznmemTbDmm9LmOQiG3oIImuVuuaKmqtZKcKuWzBqSbyPfDarldaYPMun0GyDdPGXM_Zj5HpwuuxA0adYARJsm4miLR3AeYwqCVfjpFmmTMUAz71BGX80R14ITmrOPUGszCOak1FsVdYcXI6WY9HYJw3W3aURkH3fEXu3gbPS0PEmvPooIHqriXMYcCS_HAbI..", "tfstk": "fxSraJ4cruEPPNVS5Q-EgbZ5SG-JqhF6KMOBK9XHFQAoV0GhusCEd8FLyZRFis8HP4tlK6WpgUwJRBsVT151Fg_7yJSVKOAWd8KoeZC1I0s7yHie2HK315Z_YT6JvHxbgP3owEvwpkikE0Dz7sn415Z_czDDY7P1O3HMJ49piLmkxp24mdJ9xLvkx-RDLd3nZ6xHPxSm_5MWyoBwiNdHdyegZxRxxDju2UAZAIpv3irWyC6ldDiQpL8ysaA0Zf3V0EXp8MHKYhWlW6pGZbVyxtJN_d5zRvdPoOs5-Tr4s3sRENYhjW3h_35eSgYqKDWJYQx2nG2IWIs2N6jyoRiwRnj6S3bbk7TB4d5h2_kEx1jPcXp0O2sdzX0erKp21-y2Ah5F7W_O3o0KJEYM3CwJ-23prKp21-yqJ2L-jKR_eef.." } url = "https://www.lazada.com.my/pop-mart-official-store/" params = { "ajax": "true", "from": "wangpu", "page": str(page), "q": "All-Products" } response = requests.get(url, headers=headers, cookies=cookies, params=params, proxies=get_proxys(log), timeout=10) # print(response.json()) # print(response) response.raise_for_status() resp_json = response.json() return resp_json def parse_data(log, resp_json, sql_pool): try: listItems = resp_json.get("mods", {}).get("listItems", []) if listItems: info_list = [] for item in listItems: title = item.get("name") item_id = item.get("itemId") images_list = item.get("thumbs", [{}]) images_list = [image.get("image") for image in images_list] images = "|".join(images_list) if images_list else "" # print(images) if not images: images = item.get("image") original_price_show = item.get("originalPriceShow") price_show = item.get("priceShow") review = item.get("review") location = item.get("location") # description = item.get("description", []) # description = "|".join(description) seller_name = item.get("sellerName") seller_id = item.get("sellerId") brand_name = item.get("brandName") brand_id = item.get("brandId") cheapest_sku = item.get("cheapest_sku") # categories = item.get("categories", []) # categories = "|".join(categories) item_sold_show = item.get("itemSoldCntShow") item_url = item.get("itemUrl") if item_url: item_url = "https:" + item_url in_stock = item.get("inStock") info_dict = { "title": title, "item_id": item_id, "images": images, "original_price_show": original_price_show, "price_show": price_show, "review": review, "location": location, "seller_name": seller_name, "seller_id": seller_id, "brand_name": brand_name, "brand_id": brand_id, "cheapest_sku": cheapest_sku, "item_sold_show": item_sold_show, "item_url": item_url, "in_stock": in_stock # 是否有货 } # print(info_dict) info_list.append(info_dict) sql_pool.insert_many(table="popmart_lazada_record", data_list=info_list) else: log.info(f"No data found") except Exception as e: log.error(f"parse_data error: {e}") @retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log) def pop_lazada_main(log): """ 主函数 :param log: logger对象 """ log.info( f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务....................................................') # 配置 MySQL 连接池 sql_pool = MySQLConnectionPool(log=log) if not sql_pool.check_pool_health(): log.error("数据库连接池异常") raise RuntimeError("数据库连接池异常") try: # 第一次抓取 共9页 for p in range(1, 10): try: resp_json = get_resp_one_page(log, p) parse_data(log, resp_json, sql_pool) except Exception as e: log.error(f"Request get_resp_one_page page: {p}, error: {e}") time.sleep(random.uniform(1, 2)) except Exception as e: log.error(f'{inspect.currentframe().f_code.co_name} error: {e}') finally: log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............') def schedule_task(): """ 爬虫模块的定时任务启动文件 """ # 立即运行一次任务 # pop_lazada_main(log=logger) # 设置定时任务 # schedule.every().day.at("00:01").do(pop_lazada_main, log=logger) # schedule.every(30).minutes.do(pop_lazada_main, log=logger) schedule.every(3).hours.do(pop_lazada_main, log=logger) while True: schedule.run_pending() time.sleep(1) if __name__ == '__main__': schedule_task() # json_str = test_dict.test_dict # parse_data(logger, json_str,None)