python - python 通过链接快速循环

import requests
import json
from tqdm import tqdm

要循环的链接列表

links =['https://www.google.com/','https://www.google.com/','https://www.google.com/']

使用请求的链接的 for 循环

data = []
for link in tqdm(range(len(links))):
    response = requests.get(links[link])
    response = response.json()
    data.append(response)

上面的 for 循环用于遍历所有链接列表,但是当我尝试在大约一千个链接上循环任何帮助时,它很耗时。

回答1

最简单的方法是把它变成多线程的。最好的方法可能是异步的。

多线程解决方案:

import requests
from tqdm.contrib.concurrent import thread_map

links =['https://www.google.com/','https://www.google.com/','https://www.google.com/']

def get_data(url):
    response = requests.get(url)
    response = response.json()  # Do note this might fail at times
    return response

data = thread_map(get_data, links)

或者不使用 tqdm.contrib.concurrent.thread_map

import requests
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm

links =['https://www.google.com/','https://www.google.com/','https://www.google.com/']

def get_data(url):
    response = requests.get(url)
    response = response.json()  # Do note this might fail at times
    return response

executor = ThreadPoolExecutor()

data = list(tqdm(executor.map(get_data, links), total=len(links)))

回答2

正如评论中所建议的,您可以使用 asyncio 和 aiohttp。

import asyncio
import aiohttp

urls = ["your", "links", "here"]

# create aio connector
conn = aiohttp.TCPConnector(limit_per_host=100, limit=0, ttl_dns_cache=300)

# set number of parallel requests - if you are requesting different domains you are likely to be able to set this higher, otherwise you may be rate limited
PARALLEL_REQUESTS = 10

# Create results array to collect results
results = []

async def gather_with_concurrency(n):
    # Create semaphore for async i/o  
    semaphore = asyncio.Semaphore(n)

    # create an aiohttp session using the previous connector
    session = aiohttp.ClientSession(connector=conn)

    # await logic for get request
    async def get(URL):
        async with semaphore:
            async with session.get(url, ssl=False) as response:
                obj = await response.read()
                # once object is acquired we append to list
                results.append(obj)
    # wait for all requests to be gathered and then close session
    await asyncio.gather(*(get(url) for url in urls))
    await session.close()

# get async event loop
loop = asyncio.get_event_loop()
# run using number of parallel requests
loop.run_until_complete(gather_with_concurrency(PARALLEL_REQUESTS))
# Close connection
conn.close()

# loop through results and do something to them
for res in results:
    do_something(res)

我试图尽可能地评论代码。

我已经使用 BS4 以这种方式解析请求(在 do_something 逻辑中),但这实际上取决于您的用例。

相似文章

最新文章