Browse Source

update 6.13.1

lei.chen 6 tháng trước cách đây
mục cha
commit
eaf8da89c7

+ 1 - 1
bgs_spider/README.md

@@ -4,6 +4,6 @@
 
 ```python
 # 启动命令
-python bgs_new_daily_spider.py
+python bgs_spider.py
 ```
 

+ 17 - 0
bgs_spider/add_task.py

@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Author : Charley
+# Python : 3.10.8
+# Date   : 2025/6/13 11:48
+from mysq_pool import MySQLConnectionPool
+from loguru import logger
+
+
+sql_pool = MySQLConnectionPool(log=logger)
+
+max_bgs_id = sql_pool.select_one("SELECT MAX(auth_code) AS max_number FROM bgs_task")
+# print(max_bgs_id_list)
+max_bgs_id = max_bgs_id[0]
+logger.info(f"max_bgs_id 从 {max_bgs_id} 开始爬取.........................")
+bgs_id_list = [i for i in range(max_bgs_id+1, 20000001)]
+
+sql_pool.insert_all("INSERT INTO bgs_task(auth_code) VALUES (%s)", bgs_id_list)

+ 188 - 0
bgs_spider/bgs_history_spider.py

@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# Author : Charley
+# Python : 3.10.8
+# Date   : 2025/5/28 17:54
+import time
+import requests
+import schedule
+import user_agent
+from loguru import logger
+import concurrent.futures
+from tenacity import stop_after_attempt, wait_fixed, retry
+from mysq_pool import MySQLConnectionPool
+
+logger.remove()
+logger.add("logs/his_{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
+           format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
+           level="DEBUG", retention="1 day")
+
+
+def after_log(retry_state):
+    """
+    retry 回调
+    :param retry_state: RetryCallState 对象
+    """
+    # 检查 args 是否存在且不为空
+    if retry_state.args and len(retry_state.args) > 0:
+        log = retry_state.args[0]  # 获取传入的 logger
+    else:
+        log = logger  # 使用全局 logger
+
+    if retry_state.outcome.failed:
+        log.warning(
+            f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
+    else:
+        log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
+
+
+@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
+def get_proxys(log):
+    tunnel = "x371.kdltps.com:15818"
+    kdl_username = "t13753103189895"
+    kdl_password = "o0yefv6z"
+    try:
+        proxies = {
+            "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
+            "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
+        }
+        return proxies
+    except Exception as e:
+        log.error(f"Error getting proxy: {e}")
+        raise e
+
+
+def save_data(mysql_pool, info):
+    """
+    :param mysql_pool:
+    :param info:
+    :return:
+    """
+    sql = "INSERT INTO beckett_bgs_record(set_name, player_name, date_graded, centering_grade, corner_grade, edges_grade, surfaces_grade, auto_grade, final_grade, total_grade, cards_grade, number) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
+    mysql_pool.insert_one(sql, info)
+
+
+@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
+def get_data(log, bgs_id, mysql_pool):
+    """
+    :param log:
+    :param bgs_id:
+    :param mysql_pool:
+    :return:
+    """
+    headers = {
+        "accept": "application/json, text/plain, */*",
+        "user-agent": user_agent.generate_user_agent()
+    }
+    url = "https://www.beckett.com/api/grading/lookup"
+    params = {
+        "category": "BGS",
+        "serialNumber": str(bgs_id)
+    }
+    response = requests.get(url, headers=headers, params=params, proxies=get_proxys(log), timeout=5)
+    if response.status_code == 404:
+        # 没有数据 No Record Found 将状态改为3
+        log.warning(f"No Record Found for {bgs_id}")
+        mysql_pool.update_one("UPDATE bgs_task SET state=3 WHERE auth_code=%s", (bgs_id,))
+        return
+
+    if response.status_code != 200:
+        # 查询失败  将状态改为2
+        log.warning(f"Error getting data for {bgs_id}, {response.status_code}")
+        mysql_pool.update_one("UPDATE bgs_task SET state=2 WHERE auth_code=%s", (bgs_id,))
+        return
+
+    # print(response.json())
+    result_dict = response.json()
+    if result_dict:
+        set_name = result_dict.get('set_name')
+        player_name = result_dict.get('player_name')
+        date_graded = result_dict.get('date_graded')
+        centering_grade = result_dict.get('center_grade')
+        corner_grade = result_dict.get('corners_grade')
+        edges_grade = result_dict.get('edges_grade')
+        surfaces_grade = result_dict.get('surface_grade')
+        auto_grade = result_dict.get('autograph_grade')
+        final_grade = result_dict.get('final_grade')
+        total_grade = result_dict.get('pop_report')
+        cards_grade = result_dict.get('pop_higher')
+        info = (set_name, player_name, date_graded, centering_grade, corner_grade, edges_grade, surfaces_grade,
+                auto_grade, final_grade, total_grade, cards_grade, int(bgs_id))
+
+        # 检查所有值是否都为 None或空字符串, 不包含bgs_id
+        all_none_or_empty = all(x is None or x == '' for x in info[:-1])
+        if all_none_or_empty:
+            log.debug("All values are empty")
+        else:
+            # print(info)
+            save_data(mysql_pool, info)
+            # 查询成功  将状态改为1
+            mysql_pool.update_one("UPDATE bgs_task SET state=1 WHERE auth_code=%s", (bgs_id,))
+
+
+def process_urls(log, ids, mysql_pool, batch_size=1000, max_workers=5):
+    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+        for i in range(0, len(ids), batch_size):
+            # print(i)
+            batch = ids[i:i + batch_size]
+            # print(batch)
+            try:
+                futures_to_urls = {executor.submit(get_data, log, url, mysql_pool): url for url in batch}
+                for future in concurrent.futures.as_completed(futures_to_urls):
+                    url = futures_to_urls[future]
+                    try:
+                        future.result()
+                        log.debug(f"处理 {url} 成功")
+                    except Exception as exc:
+                        log.debug(f"处理 {url} 出错: {exc}")
+            except Exception as e:
+                log.error(f"提交任务失败: {e}")
+
+
+@retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
+def bgs_main(log):
+    try:
+        log.info(
+            "开始运行 bgs_main 爬虫任务............................................................")
+        sql_pool = MySQLConnectionPool(log=log)
+        if not sql_pool:
+            log.error("数据库连接失败")
+            raise Exception("数据库连接失败")
+
+        while True:
+            sql_bgs_id_list = sql_pool.select_all(
+                "SELECT auth_code FROM bgs_task WHERE state in (2,3,4) LIMIT 10000")
+            sql_bgs_id_list = [bid[0] for bid in sql_bgs_id_list]
+            # for bid in sql_bgs_id_list:
+            if not sql_bgs_id_list:
+                log.info("没有需要处理的数据")
+                break
+            try:
+                process_urls(log, sql_bgs_id_list, sql_pool, batch_size=1000,
+                             max_workers=10)  # 根据需要调整batch_size和max_workers
+                # get_data(bid, mysql_pool)
+            except Exception as e:
+                log.error('process urls: ', e)
+
+    except Exception as e:
+        log.error(e)
+    finally:
+        log.info("爬虫程序运行结束,等待下一轮的采集任务.....................")
+
+
+def schedule_task():
+    """
+    设置定时任务
+    """
+    # 立即运行一次任务
+    bgs_main(logger)
+
+    # 设置定时任务
+    schedule.every().day.at("03:01").do(bgs_main, logger)
+    while True:
+        schedule.run_pending()
+        time.sleep(1)
+
+
+if __name__ == '__main__':
+    schedule_task()
+    # get_data('1000743')

+ 154 - 0
bgs_spider/bgs_lack_add.py

@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+# Author : Charley
+# Python : 3.10.8
+# Date   : 2025/5/28 15:35
+import inspect
+import requests
+import user_agent
+from loguru import logger
+import concurrent.futures
+from tenacity import stop_after_attempt, wait_fixed, retry
+from mysql_pool import MySQLConnectionPool
+
+logger.remove()
+logger.add("logs/add_{time:YYYYMMDD}.log", encoding='utf-8', rotation="00:00",
+           format="[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level} {message}",
+           level="DEBUG", retention="1 day")
+
+
+def after_log(retry_state):
+    """
+    retry 回调
+    :param retry_state: RetryCallState 对象
+    """
+    # 检查 args 是否存在且不为空
+    if retry_state.args and len(retry_state.args) > 0:
+        log = retry_state.args[0]  # 获取传入的 logger
+    else:
+        log = logger  # 使用全局 logger
+
+    if retry_state.outcome.failed:
+        log.warning(
+            f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} Times")
+    else:
+        log.info(f"Function '{retry_state.fn.__name__}', Attempt {retry_state.attempt_number} succeeded")
+
+
+@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
+def get_proxys(log):
+    tunnel = "x371.kdltps.com:15818"
+    kdl_username = "t13753103189895"
+    kdl_password = "o0yefv6z"
+    try:
+        proxies = {
+            "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel},
+            "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": kdl_username, "pwd": kdl_password, "proxy": tunnel}
+        }
+        return proxies
+    except Exception as e:
+        log.error(f"Error getting proxy: {e}")
+        raise e
+
+
+@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), after=after_log)
+def get_data(log, bgs_id, sql_pool):
+    """
+    :param log:
+    :param bgs_id:
+    :param sql_pool:
+    :return:
+    """
+    headers = {
+        "accept": "application/json, text/plain, */*",
+        "user-agent": user_agent.generate_user_agent()
+    }
+    url = "https://www.beckett.com/api/grading/lookup"
+    params = {
+        "category": "BGS",
+        "serialNumber": str(bgs_id)
+    }
+    response = requests.get(url, headers=headers, params=params, proxies=get_proxys(log), timeout=5)
+    if response.status_code == 404:
+        # 没有数据 No Record Found 将状态改为3
+        log.warning(f"No Record Found for {bgs_id}")
+        sql_pool.update_one("UPDATE bgs_task SET state=3 WHERE auth_code=%s", (bgs_id,))
+        return
+
+    if response.status_code != 200:
+        # 查询失败  将状态改为2
+        log.warning(f"Error getting data for {bgs_id}, {response.status_code}")
+        sql_pool.update_one("UPDATE bgs_task SET state=2 WHERE auth_code=%s", (bgs_id,))
+        return
+
+    result_dict = response.json()
+    if result_dict:
+        centering_grade = result_dict.get('center_grade')
+        corner_grade = result_dict.get('corners_grade')
+        edges_grade = result_dict.get('edges_grade')
+        surfaces_grade = result_dict.get('surface_grade')
+
+        total_grade = result_dict.get('pop_report')
+        cards_grade = result_dict.get('pop_higher')
+        info = (centering_grade, corner_grade, edges_grade, surfaces_grade, total_grade, cards_grade, int(bgs_id))
+
+        # 检查所有值是否都为 None或空字符串, 不包含bgs_id
+        all_none_or_empty = all(x is None or x == '' for x in info[:-1])
+        if all_none_or_empty:
+            log.debug("All values are empty")
+        else:
+            # print(info)
+            sql_pool.update_one(
+                "UPDATE beckett_bgs_record SET centering_grade=%s, corner_grade=%s, edges_grade=%s ,surfaces_grade=%s, total_grade=%s, cards_grade=%s WHERE number=%s",
+                info)
+
+            # 查询成功  将状态改为1
+            sql_pool.update_one("UPDATE bgs_task SET state=1 WHERE auth_code=%s", (bgs_id,))
+
+
+def process_urls(log, ids, mysql_pool, batch_size=1000, max_workers=5):
+    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+        for i in range(0, len(ids), batch_size):
+            # print(i)
+            batch = ids[i:i + batch_size]
+            # print(batch)
+            try:
+                futures_to_urls = {executor.submit(get_data, log, url, mysql_pool): url for url in batch}
+                for future in concurrent.futures.as_completed(futures_to_urls):
+                    url = futures_to_urls[future]
+                    try:
+                        future.result()
+                        log.debug(f"处理 {url} 成功")
+                    except Exception as exc:
+                        log.debug(f"处理 {url} 出错: {exc}")
+            except Exception as e:
+                log.error(f"提交任务失败: {e}")
+
+
+@retry(stop=stop_after_attempt(100), wait=wait_fixed(3600), after=after_log)
+def bgs_add_main(log):
+    try:
+        log.info(f'开始运行 {inspect.currentframe().f_code.co_name} 爬虫任务.........................................')
+
+        sql_pool = MySQLConnectionPool(log=log)
+        if not sql_pool:
+            log.error("数据库连接失败")
+            raise Exception("数据库连接失败")
+
+        # 补充之前接口变化的异常数据
+        sql = "SELECT number FROM beckett_bgs_record WHERE centering_grade IS NULL AND corner_grade IS NULL AND surfaces_grade IS NULL;"
+        sql_bgs_id_list = sql_pool.select_all(sql)
+        sql_bgs_id_list = [bid[0] for bid in sql_bgs_id_list]
+        try:
+            process_urls(log, sql_bgs_id_list, sql_pool, batch_size=1000,
+                         max_workers=10)  # 根据需要调整batch_size和max_workers
+        except Exception as e:
+            log.error('process urls: ', e)
+
+    except Exception as e:
+        log.error(e)
+    finally:
+        log.info(f'爬虫程序 {inspect.currentframe().f_code.co_name} 运行结束,等待下一轮的采集任务............')
+
+
+if __name__ == '__main__':
+    bgs_add_main(logger)

+ 9 - 9
bgs_spider/bgs_new_daily_spider.py

@@ -164,17 +164,17 @@ def bgs_main(log):
             log.error("数据库连接失败")
             raise Exception("数据库连接失败")
 
-        max_bgs_id = sql_pool.select_one("SELECT MAX(number) AS max_number FROM beckett_bgs_record")
-        # print(max_bgs_id_list)
-        max_bgs_id = max_bgs_id[0]
-        log.info(f"max_bgs_id 从 {max_bgs_id} 开始爬取.........................")
-        bgs_id_list = [i for i in range(max_bgs_id, max_bgs_id + 3001)]
-
-        sql_pool.insert_all("INSERT INTO bgs_task(auth_code) VALUES (%s)", bgs_id_list)
+        # max_bgs_id = sql_pool.select_one("SELECT MAX(number) AS max_number FROM beckett_bgs_record")
+        # # print(max_bgs_id_list)
+        # max_bgs_id = max_bgs_id[0]
+        # log.info(f"max_bgs_id 从 {max_bgs_id} 开始爬取.........................")
+        # bgs_id_list = [i for i in range(max_bgs_id, max_bgs_id + 3001)]
+        #
+        # sql_pool.insert_all("INSERT INTO bgs_task(auth_code) VALUES (%s)", bgs_id_list)
 
         # 倒序查 5000个
         sql_bgs_id_list = sql_pool.select_all(
-            "SELECT auth_code FROM bgs_task WHERE state!=1 ORDER BY id DESC LIMIT 5000")
+            "SELECT auth_code FROM bgs_task WHERE state!=1 AND id > 17990000 LIMIT 5000")
         sql_bgs_id_list = [bid[0] for bid in sql_bgs_id_list]
         # for bid in sql_bgs_id_list:
         try:
@@ -195,7 +195,7 @@ def schedule_task():
     设置定时任务
     """
     # 立即运行一次任务
-    # bgs_main(logger)
+    bgs_main(logger)
 
     # 设置定时任务
     schedule.every().day.at("03:01").do(bgs_main, logger)

+ 531 - 0
bgs_spider/mysql_pool.py

@@ -0,0 +1,531 @@
+# -*- coding: utf-8 -*-
+# Author : Charley
+# Python : 3.10.8
+# Date   : 2025/3/25 14:14
+import re
+import pymysql
+import YamlLoader
+from loguru import logger
+from dbutils.pooled_db import PooledDB
+
+# 获取yaml配置
+yaml = YamlLoader.readYaml()
+mysqlYaml = yaml.get("mysql")
+sql_host = mysqlYaml.getValueAsString("host")
+sql_port = mysqlYaml.getValueAsInt("port")
+sql_user = mysqlYaml.getValueAsString("username")
+sql_password = mysqlYaml.getValueAsString("password")
+sql_db = mysqlYaml.getValueAsString("db")
+
+
+class MySQLConnectionPool:
+    """
+    MySQL连接池
+    """
+
+    def __init__(self, mincached=4, maxcached=5, maxconnections=10, log=None):
+        """
+        初始化连接池
+        :param mincached: 初始化时,链接池中至少创建的链接,0表示不创建
+        :param maxcached: 池中空闲连接的最大数目(0 或 None 表示池大小不受限制)
+        :param maxconnections: 允许的最大连接数(0 或 None 表示任意数量的连接)
+        :param log: 自定义日志记录器
+        """
+        # 使用 loguru 的 logger,如果传入了其他 logger,则使用传入的 logger
+        self.log = log or logger
+        self.pool = PooledDB(
+            creator=pymysql,
+            mincached=mincached,
+            maxcached=maxcached,
+            maxconnections=maxconnections,
+            blocking=True,  # 连接池中如果没有可用连接后,是否阻塞等待。True,等待;False,不等待然后报错
+            host=sql_host,
+            port=sql_port,
+            user=sql_user,
+            password=sql_password,
+            database=sql_db,
+            ping=0  # 每次连接使用时自动检查有效性(0=不检查,1=执行query前检查,2=每次执行前检查)
+        )
+
+    def _execute(self, query, args=None, commit=False):
+        """
+        执行SQL
+        :param query: SQL语句
+        :param args: SQL参数
+        :param commit: 是否提交事务
+        :return: 查询结果
+        """
+        try:
+            with self.pool.connection() as conn:
+                with conn.cursor() as cursor:
+                    cursor.execute(query, args)
+                    if commit:
+                        conn.commit()
+                    self.log.debug(f"sql _execute, Query: {query}, Rows: {cursor.rowcount}")
+                    return cursor
+        except Exception as e:
+            if commit:
+                conn.rollback()
+            self.log.error(f"Error executing query: {e}, Query: {query}, Args: {args}")
+            raise e
+
+    def select_one(self, query, args=None):
+        """
+        执行查询,返回单个结果
+        :param query: 查询语句
+        :param args: 查询参数
+        :return: 查询结果
+        """
+        cursor = self._execute(query, args)
+        return cursor.fetchone()
+
+    def select_all(self, query, args=None):
+        """
+        执行查询,返回所有结果
+        :param query: 查询语句
+        :param args: 查询参数
+        :return: 查询结果
+        """
+        cursor = self._execute(query, args)
+        return cursor.fetchall()
+
+    def insert_one(self, query, args):
+        """
+        执行单条插入语句
+        :param query: 插入语句
+        :param args: 插入参数
+        """
+        self.log.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data insert_one 入库中>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
+        cursor = self._execute(query, args, commit=True)
+        return cursor.lastrowid  # 返回插入的ID
+
+    def insert_all(self, query, args_list):
+        """
+        执行批量插入语句,如果失败则逐条插入
+        :param query: 插入语句
+        :param args_list: 插入参数列表
+        """
+        conn = None
+        cursor = None
+        try:
+            conn = self.pool.connection()
+            cursor = conn.cursor()
+            cursor.executemany(query, args_list)
+            conn.commit()
+            self.log.debug(f"sql insert_all, SQL: {query}, Rows: {len(args_list)}")
+            self.log.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data insert_all 入库中>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
+        except Exception as e:
+            conn.rollback()
+            self.log.error(f"Batch insertion failed after 5 attempts. Trying single inserts. Error: {e}")
+            # 如果批量插入失败,则逐条插入
+            rowcount = 0
+            for args in args_list:
+                self.insert_one(query, args)
+                rowcount += 1
+            self.log.debug(f"Batch insertion failed. Inserted {rowcount} rows individually.")
+        finally:
+            if cursor:
+                cursor.close()
+            if conn:
+                conn.close()
+
+    def insert_one_or_dict(self, table=None, data=None, query=None, args=None, commit=True):
+        """
+        单条插入(支持字典或原始SQL)
+        :param table: 表名(字典插入时必需)
+        :param data: 字典数据 {列名: 值}
+        :param query: 直接SQL语句(与data二选一)
+        :param args: SQL参数(query使用时必需)
+        :param commit: 是否自动提交
+        :return: 最后插入ID
+        """
+        if data is not None:
+            if not isinstance(data, dict):
+                raise ValueError("Data must be a dictionary")
+
+            keys = ', '.join([self._safe_identifier(k) for k in data.keys()])
+            values = ', '.join(['%s'] * len(data))
+            query = f"INSERT INTO {self._safe_identifier(table)} ({keys}) VALUES ({values})"
+            args = tuple(data.values())
+        elif query is None:
+            raise ValueError("Either data or query must be provided")
+
+        cursor = self._execute(query, args, commit)
+        self.log.info(f"sql insert_one_or_dict, Table: {table}, Rows: {cursor.rowcount}")
+        self.log.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data insert_one_or_dict 入库中>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
+        return cursor.lastrowid
+
+    def insert_many(self, table=None, data_list=None, query=None, args_list=None, batch_size=500, commit=True):
+        """
+        批量插入(支持字典列表或原始SQL)
+        :param table: 表名(字典插入时必需)
+        :param data_list: 字典列表 [{列名: 值}]
+        :param query: 直接SQL语句(与data_list二选一)
+        :param args_list: SQL参数列表(query使用时必需)
+        :param batch_size: 分批大小
+        :param commit: 是否自动提交
+        :return: 影响行数
+        """
+        if data_list is not None:
+            if not data_list or not isinstance(data_list[0], dict):
+                raise ValueError("Data_list must be a non-empty list of dictionaries")
+
+            keys = ', '.join([self._safe_identifier(k) for k in data_list[0].keys()])
+            values = ', '.join(['%s'] * len(data_list[0]))
+            query = f"INSERT INTO {self._safe_identifier(table)} ({keys}) VALUES ({values})"
+            args_list = [tuple(d.values()) for d in data_list]
+        elif query is None:
+            raise ValueError("Either data_list or query must be provided")
+
+        total = 0
+        for i in range(0, len(args_list), batch_size):
+            batch = args_list[i:i + batch_size]
+            try:
+                with self.pool.connection() as conn:
+                    with conn.cursor() as cursor:
+                        cursor.executemany(query, batch)
+                        if commit:
+                            conn.commit()
+                        total += cursor.rowcount
+            except pymysql.Error as e:
+                if "Duplicate entry" in str(e):
+                    # self.log.warning(f"检测到重复条目,开始逐条插入。错误详情: {e}")
+                    raise  e
+                    # rowcount = 0
+                    # for args in batch:
+                    #     try:
+                    #         self.insert_one_or_dict(table=table, data=dict(zip(data_list[0].keys(), args)),
+                    #                                 commit=commit)
+                    #         rowcount += 1
+                    #     except pymysql.err.IntegrityError as e2:
+                    #         if "Duplicate entry" in str(e2):
+                    #             self.log.warning(f"跳过重复条目: {args}")
+                    #         else:
+                    #             self.log.error(f"插入失败: {e2}, 参数: {args}")
+                    # total += rowcount
+                else:
+                    self.log.error(f"数据库错误: {e}")
+                    if commit:
+                        conn.rollback()
+                    raise e
+                # 重新抛出异常,供外部捕获
+                # 降级为单条插入
+                # for args in batch:
+                #     try:
+                #         self.insert_one_or_dict(table=None, query=query, args=args, commit=commit)
+                #         total += 1
+                #     except Exception as e2:
+                #         self.log.error(f"Single insert failed: {e2}")
+                        # continue
+        self.log.info(f"sql insert_many, Table: {table}, Total Rows: {total}")
+        return total
+
+    def insert_many_two(self, table=None, data_list=None, query=None, args_list=None, batch_size=500, commit=True):
+        """
+        批量插入(支持字典列表或原始SQL)
+        :param table: 表名(字典插入时必需)
+        :param data_list: 字典列表 [{列名: 值}]
+        :param query: 直接SQL语句(与data_list二选一)
+        :param args_list: SQL参数列表(query使用时必需)
+        :param batch_size: 分批大小
+        :param commit: 是否自动提交
+        :return: 影响行数
+        """
+        if data_list is not None:
+            if not data_list or not isinstance(data_list[0], dict):
+                raise ValueError("Data_list must be a non-empty list of dictionaries")
+            keys = ', '.join([self._safe_identifier(k) for k in data_list[0].keys()])
+            values = ', '.join(['%s'] * len(data_list[0]))
+            query = f"INSERT INTO {self._safe_identifier(table)} ({keys}) VALUES ({values})"
+            args_list = [tuple(d.values()) for d in data_list]
+        elif query is None:
+            raise ValueError("Either data_list or query must be provided")
+
+        total = 0
+        for i in range(0, len(args_list), batch_size):
+            batch = args_list[i:i + batch_size]
+            try:
+                with self.pool.connection() as conn:
+                    with conn.cursor() as cursor:
+                        # 添加调试日志:输出 SQL 和参数示例
+                        # self.log.debug(f"Batch insert SQL: {query}")
+                        # self.log.debug(f"Sample args: {batch[0] if batch else 'None'}")
+                        cursor.executemany(query, batch)
+                        if commit:
+                            conn.commit()
+                        total += cursor.rowcount
+                        # self.log.debug(f"Batch insert succeeded. Rows: {cursor.rowcount}")
+            except Exception as e:  # 明确捕获数据库异常
+                self.log.exception(f"Batch insert failed: {e}")  # 使用 exception 记录堆栈
+                self.log.error(f"Failed SQL: {query}, Args count: {len(batch)}")
+                if commit:
+                    conn.rollback()
+                # 降级为单条插入,并记录每个错误
+                rowcount = 0
+                for args in batch:
+                    try:
+                        self.insert_one(query, args)
+                        rowcount += 1
+                    except Exception as e2:
+                        self.log.error(f"Single insert failed: {e2}, Args: {args}")
+                total += rowcount
+                self.log.debug(f"Inserted {rowcount}/{len(batch)} rows individually.")
+        self.log.info(f"sql insert_many, Table: {table}, Total Rows: {total}")
+        return total
+
+    def insert_too_many(self, query, args_list, batch_size=1000):
+        """
+        执行批量插入语句,分片提交, 单次插入大于十万+时可用, 如果失败则降级为逐条插入
+        :param query: 插入语句
+        :param args_list: 插入参数列表
+        :param batch_size: 每次插入的条数
+        """
+        for i in range(0, len(args_list), batch_size):
+            batch = args_list[i:i + batch_size]
+            try:
+                with self.pool.connection() as conn:
+                    with conn.cursor() as cursor:
+                        cursor.executemany(query, batch)
+                        conn.commit()
+            except Exception as e:
+                self.log.error(f"insert_too_many error. Trying single insert. Error: {e}")
+                # 当前批次降级为单条插入
+                for args in batch:
+                    self.insert_one(query, args)
+
+    def update_one(self, query, args):
+        """
+        执行单条更新语句
+        :param query: 更新语句
+        :param args: 更新参数
+        """
+        self.log.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data update_one 更新中>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
+        return self._execute(query, args, commit=True)
+
+    def update_all(self, query, args_list):
+        """
+        执行批量更新语句,如果失败则逐条更新
+        :param query: 更新语句
+        :param args_list: 更新参数列表
+        """
+        conn = None
+        cursor = None
+        try:
+            conn = self.pool.connection()
+            cursor = conn.cursor()
+            cursor.executemany(query, args_list)
+            conn.commit()
+            self.log.debug(f"sql update_all, SQL: {query}, Rows: {len(args_list)}")
+            self.log.info('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data update_all 更新中>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
+        except Exception as e:
+            conn.rollback()
+            self.log.error(f"Error executing query: {e}")
+            # 如果批量更新失败,则逐条更新
+            rowcount = 0
+            for args in args_list:
+                self.update_one(query, args)
+                rowcount += 1
+            self.log.debug(f'Batch update failed. Updated {rowcount} rows individually.')
+        finally:
+            if cursor:
+                cursor.close()
+            if conn:
+                conn.close()
+
+    def update_one_or_dict(self, table=None, data=None, condition=None, query=None, args=None, commit=True):
+        """
+        单条更新(支持字典或原始SQL)
+        :param table: 表名(字典模式必需)
+        :param data: 字典数据 {列名: 值}(与 query 二选一)
+        :param condition: 更新条件,支持以下格式:
+            - 字典: {"id": 1} → "WHERE id = %s"
+            - 字符串: "id = 1" → "WHERE id = 1"(需自行确保安全)
+            - 元组: ("id = %s", [1]) → "WHERE id = %s"(参数化查询)
+        :param query: 直接SQL语句(与 data 二选一)
+        :param args: SQL参数(query 模式下必需)
+        :param commit: 是否自动提交
+        :return: 影响行数
+        :raises: ValueError 参数校验失败时抛出
+        """
+        # 参数校验
+        if data is not None:
+            if not isinstance(data, dict):
+                raise ValueError("Data must be a dictionary")
+            if table is None:
+                raise ValueError("Table name is required for dictionary update")
+            if condition is None:
+                raise ValueError("Condition is required for dictionary update")
+
+            # 构建 SET 子句
+            set_clause = ", ".join([f"{self._safe_identifier(k)} = %s" for k in data.keys()])
+            set_values = list(data.values())
+
+            # 解析条件
+            condition_clause, condition_args = self._parse_condition(condition)
+            query = f"UPDATE {self._safe_identifier(table)} SET {set_clause} WHERE {condition_clause}"
+            args = set_values + condition_args
+
+        elif query is None:
+            raise ValueError("Either data or query must be provided")
+
+        # 执行更新
+        cursor = self._execute(query, args, commit)
+        # self.log.debug(
+        #     f"Updated table={table}, rows={cursor.rowcount}, query={query[:100]}...",
+        #     extra={"table": table, "rows": cursor.rowcount}
+        # )
+        return cursor.rowcount
+
+    def _parse_condition(self, condition):
+        """
+        解析条件为 (clause, args) 格式
+        :param condition: 字典/字符串/元组
+        :return: (str, list) SQL 子句和参数列表
+        """
+        if isinstance(condition, dict):
+            clause = " AND ".join([f"{self._safe_identifier(k)} = %s" for k in condition.keys()])
+            args = list(condition.values())
+        elif isinstance(condition, str):
+            clause = condition  # 注意:需调用方确保安全
+            args = []
+        elif isinstance(condition, (tuple, list)) and len(condition) == 2:
+            clause, args = condition[0], condition[1]
+            if not isinstance(args, (list, tuple)):
+                args = [args]
+        else:
+            raise ValueError("Condition must be dict/str/(clause, args)")
+        return clause, args
+
+    def update_many(self, table=None, data_list=None, condition_list=None, query=None, args_list=None, batch_size=500,
+                    commit=True):
+        """
+        批量更新(支持字典列表或原始SQL)
+        :param table: 表名(字典插入时必需)
+        :param data_list: 字典列表 [{列名: 值}]
+        :param condition_list: 条件列表(必须为字典,与data_list等长)
+        :param query: 直接SQL语句(与data_list二选一)
+        :param args_list: SQL参数列表(query使用时必需)
+        :param batch_size: 分批大小
+        :param commit: 是否自动提交
+        :return: 影响行数
+        """
+        if data_list is not None:
+            if not data_list or not isinstance(data_list[0], dict):
+                raise ValueError("Data_list must be a non-empty list of dictionaries")
+            if condition_list is None or len(data_list) != len(condition_list):
+                raise ValueError("Condition_list must be provided and match the length of data_list")
+            if not all(isinstance(cond, dict) for cond in condition_list):
+                raise ValueError("All elements in condition_list must be dictionaries")
+
+            # 获取第一个数据项和条件项的键
+            first_data_keys = set(data_list[0].keys())
+            first_cond_keys = set(condition_list[0].keys())
+
+            # 构造基础SQL
+            set_clause = ', '.join([self._safe_identifier(k) + ' = %s' for k in data_list[0].keys()])
+            condition_clause = ' AND '.join([self._safe_identifier(k) + ' = %s' for k in condition_list[0].keys()])
+            base_query = f"UPDATE {self._safe_identifier(table)} SET {set_clause} WHERE {condition_clause}"
+            total = 0
+
+            # 分批次处理
+            for i in range(0, len(data_list), batch_size):
+                batch_data = data_list[i:i + batch_size]
+                batch_conds = condition_list[i:i + batch_size]
+                batch_args = []
+
+                # 检查当前批次的结构是否一致
+                can_batch = True
+                for data, cond in zip(batch_data, batch_conds):
+                    data_keys = set(data.keys())
+                    cond_keys = set(cond.keys())
+                    if data_keys != first_data_keys or cond_keys != first_cond_keys:
+                        can_batch = False
+                        break
+                    batch_args.append(tuple(data.values()) + tuple(cond.values()))
+
+                if not can_batch:
+                    # 结构不一致,转为单条更新
+                    for data, cond in zip(batch_data, batch_conds):
+                        self.update_one_or_dict(table=table, data=data, condition=cond, commit=commit)
+                        total += 1
+                    continue
+
+                # 执行批量更新
+                try:
+                    with self.pool.connection() as conn:
+                        with conn.cursor() as cursor:
+                            cursor.executemany(base_query, batch_args)
+                            if commit:
+                                conn.commit()
+                            total += cursor.rowcount
+                            self.log.debug(f"Batch update succeeded. Rows: {cursor.rowcount}")
+                except Exception as e:
+                    if commit:
+                        conn.rollback()
+                    self.log.error(f"Batch update failed: {e}")
+                    # 降级为单条更新
+                    for args, data, cond in zip(batch_args, batch_data, batch_conds):
+                        try:
+                            self._execute(base_query, args, commit=commit)
+                            total += 1
+                        except Exception as e2:
+                            self.log.error(f"Single update failed: {e2}, Data: {data}, Condition: {cond}")
+            self.log.info(f"Total updated rows: {total}")
+            return total
+        elif query is not None:
+            # 处理原始SQL和参数列表
+            if args_list is None:
+                raise ValueError("args_list must be provided when using query")
+
+            total = 0
+            for i in range(0, len(args_list), batch_size):
+                batch_args = args_list[i:i + batch_size]
+                try:
+                    with self.pool.connection() as conn:
+                        with conn.cursor() as cursor:
+                            cursor.executemany(query, batch_args)
+                            if commit:
+                                conn.commit()
+                            total += cursor.rowcount
+                            self.log.debug(f"Batch update succeeded. Rows: {cursor.rowcount}")
+                except Exception as e:
+                    if commit:
+                        conn.rollback()
+                    self.log.error(f"Batch update failed: {e}")
+                    # 降级为单条更新
+                    for args in batch_args:
+                        try:
+                            self._execute(query, args, commit=commit)
+                            total += 1
+                        except Exception as e2:
+                            self.log.error(f"Single update failed: {e2}, Args: {args}")
+            self.log.info(f"Total updated rows: {total}")
+            return total
+        else:
+            raise ValueError("Either data_list or query must be provided")
+
+    def check_pool_health(self):
+        """
+        检查连接池中有效连接数
+
+        # 使用示例
+        # 配置 MySQL 连接池
+        sql_pool = MySQLConnectionPool(log=log)
+        if not sql_pool.check_pool_health():
+            log.error("数据库连接池异常")
+            raise RuntimeError("数据库连接池异常")
+        """
+        try:
+            with self.pool.connection() as conn:
+                conn.ping(reconnect=True)
+                return True
+        except Exception as e:
+            self.log.error(f"Connection pool health check failed: {e}")
+            return False
+
+    @staticmethod
+    def _safe_identifier(name):
+        """SQL标识符安全校验"""
+        if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name):
+            raise ValueError(f"Invalid SQL identifier: {name}")
+        return name

+ 7 - 7
bgs_spider/requirements.txt

@@ -1,10 +1,10 @@
 -i https://mirrors.aliyun.com/pypi/simple/
-DBUtils==3.1.0
-loguru==0.7.2
-parsel==1.9.1
-PyMySQL==1.1.1
-PyYAML==6.0.1
-Requests==2.32.3
-user_agent==0.1.10
+DBUtils
+loguru
+parsel
+PyMySQL
+PyYAML
+Requests
+user_agent
 tenacity
 schedule

+ 155 - 0
bgs_spider/spider_setting.py

@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+# Author : Charley
+# Python : 3.10.8
+# Date   : 2025/1/24 18:58
+manufacturer_list = [
+    {'manufacturer_value': '452970', 'manufacturer_name': 'Panini Group', 'manufacturer_filter_data': '1787965'},
+    {'manufacturer_value': '186423', 'manufacturer_name': 'Topps Co.', 'manufacturer_filter_data': '290968'},
+    {'manufacturer_value': '223464', 'manufacturer_name': 'Upper Deck Co.', 'manufacturer_filter_data': '210584'},
+    {'manufacturer_value': '223465', 'manufacturer_name': 'Fleer Inc.', 'manufacturer_filter_data': '99396'},
+    {'manufacturer_value': '506409', 'manufacturer_name': 'Leaf Trading Cards', 'manufacturer_filter_data': '83144'},
+    {'manufacturer_value': '451900', 'manufacturer_name': 'Wild Card', 'manufacturer_filter_data': '13701'},
+    {'manufacturer_value': '223640', 'manufacturer_name': 'Press Pass Inc.', 'manufacturer_filter_data': '9583'},
+    {'manufacturer_value': '223639', 'manufacturer_name': 'SAGE', 'manufacturer_filter_data': '5060'},
+    {'manufacturer_value': '451893', 'manufacturer_name': 'SkyBox', 'manufacturer_filter_data': '4857'},
+    {'manufacturer_value': '451891', 'manufacturer_name': 'Score Board', 'manufacturer_filter_data': '3647'},
+    {'manufacturer_value': '476405', 'manufacturer_name': 'Classic/Scoreboard', 'manufacturer_filter_data': '2176'},
+    {'manufacturer_value': '451889', 'manufacturer_name': 'Rittenhouse', 'manufacturer_filter_data': '2129'},
+    {'manufacturer_value': '451894', 'manufacturer_name': 'Star Co.', 'manufacturer_filter_data': '1824'},
+    {'manufacturer_value': '451650', 'manufacturer_name': "Collector's Edge", 'manufacturer_filter_data': '1609'},
+    {'manufacturer_value': '451875', 'manufacturer_name': 'Front Row', 'manufacturer_filter_data': '1156'},
+    {'manufacturer_value': '451649', 'manufacturer_name': 'Donruss/Playoff', 'manufacturer_filter_data': '1024'},
+    {'manufacturer_value': '367292', 'manufacturer_name': 'Kenner', 'manufacturer_filter_data': '968'},
+    {'manufacturer_value': '461425', 'manufacturer_name': 'Sport Kings', 'manufacturer_filter_data': '840'},
+    {'manufacturer_value': '223462', 'manufacturer_name': 'Pacific Trading Cards', 'manufacturer_filter_data': '696'},
+    {'manufacturer_value': '451895', 'manufacturer_name': 'Star Pics', 'manufacturer_filter_data': '599'},
+    {'manufacturer_value': '451886', 'manufacturer_name': 'Pinnacle/Score', 'manufacturer_filter_data': '564'},
+    {'manufacturer_value': '451892', 'manufacturer_name': 'Signature Rookies', 'manufacturer_filter_data': '539'},
+    {'manufacturer_value': '521635', 'manufacturer_name': 'Onyx', 'manufacturer_filter_data': '404'},
+    {'manufacturer_value': '562441', 'manufacturer_name': 'South Bay Cards', 'manufacturer_filter_data': '400'}]
+
+brand_list = [{'brand_value': '525746', 'brand_name': 'Panini Prizm', 'brand_filter_data': '154963'},
+              {'brand_value': '223569', 'brand_name': 'Select', 'brand_filter_data': '116018'},
+              {'brand_value': '452562', 'brand_name': 'Hoops', 'brand_filter_data': '111274'},
+              {'brand_value': '606156', 'brand_name': 'Donruss Optic', 'brand_filter_data': '100429'},
+              {'brand_value': '223472', 'brand_name': 'Donruss', 'brand_filter_data': '71906'},
+              {'brand_value': '729745', 'brand_name': 'Panini Mosaic', 'brand_filter_data': '68672'},
+              {'brand_value': '529713', 'brand_name': 'Panini National Treasures', 'brand_filter_data': '59846'},
+              {'brand_value': '545265', 'brand_name': 'Panini Spectra', 'brand_filter_data': '51491'},
+              {'brand_value': '576981', 'brand_name': 'Panini Prizm Draft Picks', 'brand_filter_data': '49778'},
+              {'brand_value': '655911', 'brand_name': 'Panini Chronicles', 'brand_filter_data': '46182'},
+              {'brand_value': '538171', 'brand_name': 'Panini Flawless', 'brand_filter_data': '45084'},
+              {'brand_value': '539639', 'brand_name': 'Immaculate Collection', 'brand_filter_data': '42448'},
+              {'brand_value': '684112', 'brand_name': 'Panini Obsidian', 'brand_filter_data': '34054'},
+              {'brand_value': '519207', 'brand_name': 'Panini Contenders', 'brand_filter_data': '32818'},
+              {'brand_value': '223471', 'brand_name': 'Upper Deck', 'brand_filter_data': '31943'},
+              {'brand_value': '743069', 'brand_name': 'Hoops Premium Stock', 'brand_filter_data': '31862'},
+              {'brand_value': '518648', 'brand_name': 'Elite', 'brand_filter_data': '31764'},
+              {'brand_value': '504171', 'brand_name': 'Court Kings', 'brand_filter_data': '31255'},
+              {'brand_value': '223469', 'brand_name': 'Finest', 'brand_filter_data': '30272'},
+              {'brand_value': '186425', 'brand_name': 'Topps', 'brand_filter_data': '29566'},
+              {'brand_value': '223468', 'brand_name': 'Topps Chrome', 'brand_filter_data': '28701'},
+              {'brand_value': '596682', 'brand_name': 'Panini Revolution', 'brand_filter_data': '26934'},
+              {'brand_value': '509756', 'brand_name': 'Totally Certified', 'brand_filter_data': '25928'},
+              {'brand_value': '582092', 'brand_name': 'Panini Noir', 'brand_filter_data': '25527'}]
+
+team_list = [{'team_value': '344668', 'team_name': 'Los Angeles Lakers', 'team_filter_data': '106019'},
+             {'team_value': '344614', 'team_name': 'Boston Celtics', 'team_filter_data': '91403'},
+             {'team_value': '344618', 'team_name': 'Chicago Bulls', 'team_filter_data': '82392'},
+             {'team_value': '344629', 'team_name': 'Golden State Warriors', 'team_filter_data': '79154'},
+             {'team_value': '344630', 'team_name': 'Houston Rockets', 'team_filter_data': '78461'},
+             {'team_value': '344652', 'team_name': 'San Antonio Spurs', 'team_filter_data': '77051'},
+             {'team_value': '344626', 'team_name': 'Detroit Pistons', 'team_filter_data': '76871'},
+             {'team_value': '344643', 'team_name': 'New York Knicks', 'team_filter_data': '74633'},
+             {'team_value': '344623', 'team_name': 'Dallas Mavericks', 'team_filter_data': '73514'},
+             {'team_value': '344646', 'team_name': 'Philadelphia 76ers', 'team_filter_data': '71613'},
+             {'team_value': '344612', 'team_name': 'Atlanta Hawks', 'team_filter_data': '70306'},
+             {'team_value': '344664', 'team_name': 'Utah Jazz', 'team_filter_data': '69966'},
+             {'team_value': '344650', 'team_name': 'Portland Trail Blazers', 'team_filter_data': '68354'},
+             {'team_value': '344624', 'team_name': 'Denver Nuggets', 'team_filter_data': '68246'},
+             {'team_value': '344640', 'team_name': 'Minnesota Timberwolves', 'team_filter_data': '68100'},
+             {'team_value': '344648', 'team_name': 'Phoenix Suns', 'team_filter_data': '67192'},
+             {'team_value': '344645', 'team_name': 'Orlando Magic', 'team_filter_data': '65474'},
+             {'team_value': '344637', 'team_name': 'Miami Heat', 'team_filter_data': '65443'},
+             {'team_value': '344621', 'team_name': 'Cleveland Cavaliers', 'team_filter_data': '65316'},
+             {'team_value': '344672', 'team_name': 'Los Angeles Clippers', 'team_filter_data': '64305'},
+             {'team_value': '344638', 'team_name': 'Milwaukee Bucks', 'team_filter_data': '64021'},
+             {'team_value': '344651', 'team_name': 'Sacramento Kings', 'team_filter_data': '63722'},
+             {'team_value': '344670', 'team_name': 'Memphis Grizzlies', 'team_filter_data': '62106'},
+             {'team_value': '344631', 'team_name': 'Indiana Pacers', 'team_filter_data': '60889'},
+             {'team_value': '475125', 'team_name': 'Oklahoma City Thunder', 'team_filter_data': '58205'},
+             {'team_value': '344671', 'team_name': 'Toronto Raptors', 'team_filter_data': '55571'},
+             {'team_value': '344667', 'team_name': 'Washington Wizards', 'team_filter_data': '54514'},
+             {'team_value': '344669', 'team_name': 'Charlotte Hornets', 'team_filter_data': '50863'},
+             {'team_value': '546251', 'team_name': 'New Orleans Pelicans', 'team_filter_data': '45831'},
+             {'team_value': '525620', 'team_name': 'Brooklyn Nets', 'team_filter_data': '45038'},
+             {'team_value': '344641', 'team_name': 'New Jersey Nets', 'team_filter_data': '20393'},
+             {'team_value': '344658', 'team_name': 'Seattle Supersonics', 'team_filter_data': '18493'},
+             {'team_value': '344673', 'team_name': 'New Orleans Hornets', 'team_filter_data': '12541'},
+             {'team_value': '370514', 'team_name': 'Charlotte Bobcats', 'team_filter_data': '10865'},
+             {'team_value': '344666', 'team_name': 'Washington Bullets', 'team_filter_data': '3908'},
+             {'team_value': '447343', 'team_name': 'Los Angeles Sparks', 'team_filter_data': '3620'},
+             {'team_value': '447349', 'team_name': 'Seattle Storm', 'team_filter_data': '3378'},
+             {'team_value': '447347', 'team_name': 'Phoenix Mercury', 'team_filter_data': '3193'},
+             {'team_value': '344674', 'team_name': 'Vancouver Grizzlies', 'team_filter_data': '3107'},
+             {'team_value': '447344', 'team_name': 'Minnesota Lynx', 'team_filter_data': '3014'},
+             {'team_value': '447346', 'team_name': 'New York Liberty', 'team_filter_data': '2942'},
+             {'team_value': '649068', 'team_name': 'Dallas Wings', 'team_filter_data': '2850'},
+             {'team_value': '447338', 'team_name': 'Chicago Sky', 'team_filter_data': '2682'},
+             {'team_value': '447350', 'team_name': 'Washington Mystics', 'team_filter_data': '2488'},
+             {'team_value': '447339', 'team_name': 'Connecticut Sun', 'team_filter_data': '2161'},
+             {'team_value': '450900', 'team_name': 'Team USA BK', 'team_filter_data': '1967'},
+             {'team_value': '344660', 'team_name': 'St. Louis Hawks', 'team_filter_data': '835'},
+             {'team_value': '447341', 'team_name': 'Houston Comets', 'team_filter_data': '668'},
+             {'team_value': '344642', 'team_name': 'New Orleans Jazz', 'team_filter_data': '570'},
+             {'team_value': '344639', 'team_name': 'Minneapolis Lakers', 'team_filter_data': '477'},
+             {'team_value': '344678', 'team_name': 'Cincinnati Royals', 'team_filter_data': '457'},
+             {'team_value': '344634', 'team_name': 'Kentucky Colonels', 'team_filter_data': '344'},
+             {'team_value': '446568', 'team_name': 'Harlem Globetrotters', 'team_filter_data': '321'},
+             {'team_value': '447348', 'team_name': 'Sacramento Monarchs', 'team_filter_data': '305'},
+             {'team_value': '344615', 'team_name': 'Buffalo Braves', 'team_filter_data': '299'},
+             {'team_value': '344633', 'team_name': 'Kansas City Kings', 'team_filter_data': '232'},
+             {'team_value': '344657', 'team_name': 'San Francisco Warriors', 'team_filter_data': '210'},
+             {'team_value': '447340', 'team_name': 'Detroit Shock', 'team_filter_data': '210'},
+             {'team_value': '344613', 'team_name': 'Baltimore Bullets', 'team_filter_data': '164'},
+             {'team_value': '447337', 'team_name': 'Charlotte Sting', 'team_filter_data': '143'},
+             {'team_value': '344653', 'team_name': 'San Diego Clippers', 'team_filter_data': '129'},
+             {'team_value': '344662', 'team_name': 'Syracuse Nationals', 'team_filter_data': '107'},
+             {'team_value': '447342', 'team_name': 'Indiana Fever', 'team_filter_data': '97'},
+             {'team_value': '344644', 'team_name': 'New York Nets', 'team_filter_data': '73'},
+             {'team_value': '344647', 'team_name': 'Philadelphia Warriors', 'team_filter_data': '66'},
+             {'team_value': '659922', 'team_name': 'Mexico', 'team_filter_data': '66'},
+             {'team_value': '580928', 'team_name': 'Atlanta Dream', 'team_filter_data': '59'},
+             {'team_value': '447351', 'team_name': 'San Antonio Silver Stars', 'team_filter_data': '57'},
+             {'team_value': '344680', 'team_name': 'Virginia Squires', 'team_filter_data': '55'},
+             {'team_value': '344681', 'team_name': 'Albuquerque Thunderbirds', 'team_filter_data': '54'},
+             {'team_value': '344685', 'team_name': 'Florida Flame', 'team_filter_data': '54'},
+             {'team_value': '344677', 'team_name': 'Rochester Royals', 'team_filter_data': '52'},
+             {'team_value': '344655', 'team_name': 'San Diego Rockets', 'team_filter_data': '50'},
+             {'team_value': '344687', 'team_name': 'Roanoke Dazzle', 'team_filter_data': '48'},
+             {'team_value': '370847', 'team_name': 'New York Yankees', 'team_filter_data': '48'},
+             {'team_value': '344665', 'team_name': 'Utah Stars', 'team_filter_data': '44'},
+             {'team_value': '344684', 'team_name': 'Fayetteville Patriots', 'team_filter_data': '42'},
+             {'team_value': '580947', 'team_name': 'San Antonio Stars', 'team_filter_data': '37'},
+             {'team_value': '344688', 'team_name': 'Tulsa 66ers', 'team_filter_data': '36'},
+             {'team_value': '344683', 'team_name': 'Austin Toros', 'team_filter_data': '36'},
+             {'team_value': '344682', 'team_name': 'Arkansas Rimrockers', 'team_filter_data': '36'},
+             {'team_value': '498532', 'team_name': 'Tulsa Shock', 'team_filter_data': '33'},
+             {'team_value': '675788', 'team_name': 'Las Vegas Aces', 'team_filter_data': '30'},
+             {'team_value': '344654', 'team_name': 'San Diego Conquistadors', 'team_filter_data': '24'},
+             {'team_value': '344625', 'team_name': 'Denver Rockets', 'team_filter_data': '23'},
+             {'team_value': '628387', 'team_name': 'Arles', 'team_filter_data': '22'},
+             {'team_value': '344622', 'team_name': 'Dallas Chaparrals', 'team_filter_data': '21'},
+             {'team_value': '370853', 'team_name': 'Seattle Mariners', 'team_filter_data': '21'},
+             {'team_value': '344649', 'team_name': 'Pittsburgh Condors', 'team_filter_data': '20'},
+             {'team_value': '344636', 'team_name': 'Memphis Sounds', 'team_filter_data': '20'},
+             {'team_value': '344617', 'team_name': 'Carolina Cougars', 'team_filter_data': '19'},
+             {'team_value': '344661', 'team_name': 'St. Louis Spirits', 'team_filter_data': '19'},
+             {'team_value': '370842', 'team_name': 'Los Angeles Dodgers', 'team_filter_data': '19'},
+             {'team_value': '344627', 'team_name': 'Florida Floridians', 'team_filter_data': '18'},
+             {'team_value': '344686', 'team_name': 'Fort Worth Flyers', 'team_filter_data': '18'},
+             {'team_value': '344679', 'team_name': 'Memphis Tams', 'team_filter_data': '14'},
+             {'team_value': '263067', 'team_name': 'Pittsburgh Penguins', 'team_filter_data': '14'},
+             {'team_value': '344616', 'team_name': 'Capital Bullets', 'team_filter_data': '13'},
+             {'team_value': '344656', 'team_name': 'San Diego Sails', 'team_filter_data': '11'},
+             {'team_value': '370850', 'team_name': 'Pittsburgh Pirates', 'team_filter_data': '11'}]