diff --git a/oneget.py b/oneget.py index 90ab7bf..355c825 100644 --- a/oneget.py +++ b/oneget.py @@ -7,7 +7,7 @@ import random import time import copy from threading import Lock -import logging +from logger import logger from DB import DBVidcon, DBSA import json from requests.adapters import HTTPAdapter @@ -15,7 +15,6 @@ from urllib3.util.retry import Retry from dateutil import parser as date_parser MACHINE_ID = 3 -logger = logging.getLogger(__name__) db = DBVidcon() proxiesdict = db.get_proxy_agent_dict() @@ -233,7 +232,7 @@ def main(): if not kwdata: logger.error("没有获取到关键词数据") exit(1) - + logger.info(f"搜索关键词数据: {kwdata}") kwdata = kwdata[0][1] rn = kwdata['rn'] proxy_name = proxiesdict.get(rn) @@ -623,6 +622,7 @@ def main(): data = response.json() edges = data['data']['search']['stories']['edges'] edges_len = len(edges) + logger.info(f"第 {i} 页,关键词: {kw},获取到 {edges_len} 条数据") tancks = [] for j, edge in enumerate(edges): node = edge.get("node", {}) @@ -637,7 +637,6 @@ def main(): "level": 0, } tancks.append(s_data) - # 我想在这加入20 个线程池 with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: executor.map(dmvideo_info.get_video_info, tancks) if edges_len < 20: