fastapi学习笔记
fastapi学习笔记
status
Published
date
Mar 16, 2021
slug
fastapi
summary
fastapi学习笔记
category
学习思考
tags
Fast API
简单运行
from fastapi import FastAPI
app = FastAPI()
@app.get('/')
def index():
		return {'hello':'world'}
一个简单的项目

main.py

from fastapi import FastAPI  
import uvicorn
import msg,ip

app = FastAPI()
app.include_router(msg.router, prefix="/msg")
app.include_router(ip.router, prefix="/ip")

@app.get('/')
async def index():
    return '首页'


if __name__ == '__main__':
    uvicorn.run(app='main:app',host='0.0.0.0',port=12345,reload=True,debug=True)

ip.py

from fastapi import APIRouter
import requests
from bs4 import BeautifulSoup
import json
from random import choice
router = APIRouter()
def api():
    url = 'http://proxylist.fatezero.org/proxy.list'
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
    res = requests.get(url,headers=headers)
    res.encoding = "utf-8"
    s = res.text
    data = []
    for i in s.split('\n'):
        try:
            ip = str(json.loads(i)['host'])+':'+str(json.loads(i)['port'])
            msg = {"ip":ip,"country":str(json.loads(i)['country']),"type":str(json.loads(i)['type']),"response_time":str(json.loads(i)['response_time']),'weixin':'zhaoipxyz'}
            data.append(msg)
        except:
            pass
    return data

def choiceone():
    ip = choice(api())
    return ip

@router.get('/')
async def msg():
    return choiceone()

msg.py

from peewee import *
import requests,time
from bs4 import BeautifulSoup

db = PostgresqlDatabase('kfnmhpuuszajss:4bb62ccc1b91797c653df18b169ad38f541489c9ed56d0770ae67823b5663227@ec2-54-156-149-189.compute-1.amazonaws.com:5432/d14bupst16squ7')

class BaseModel(Model):
    class Meta:
        database = db

class Share(BaseModel):
    title = CharField(verbose_name='标题', null=False, index=True)
    link = CharField(verbose_name='链接', null=False)
    site = CharField(verbose_name='来源站点', null=False)
    pubtime = DateTimeField(verbose_name='发布时间', null=False, default=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    class Meta:
        table_name = 'share'
try:
    Share.create_table()
except:
    print('表已存在')

class Xianbao:
    def __init__(self):
        self.headers = {
            'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'
            }

    def wgzj(self):
        site = '网购之家'
        r = requests.get('http://www.wgzj.cn/bbs/forum-2-1.html',headers=self.headers)
        html = r.text
        bs = BeautifulSoup(html,'html.parser')
        links= bs.findAll('a',class_='s xst')
        for link in links:
            title= link.text
            url = 'http://www.wgzj.cn/bbs/' + link['href']
            if Share.get(link==url).frist():
                print(title,'数据已存在!')
            else:
                Share.create(title=title,site=site,link=link)
                print(title,'采集成功!')
            

    def kxd(self):
        site ='科学刀'
        r =requests.get('https://kxdao.net/forum-42-1.html',headers=self.headers)
        html = r.text
        bs = BeautifulSoup(html,'html.parser')
        links = bs.findAll('a',class_='s xst')
        for link in links:
            title = link.text
            url = link['href']
            if Share.get(link==url).frist():
                print(title,'数据已存在!')
            else:
                Share.create(title=title,site=site,link=link)
                print(title,'采集成功!')

    def leyu(self):
        site = '乐愚社区'
        r = requests.get('https://bbs.leyuz.net/f/xianbao', headers=self.headers)
        html = r.text
        bs = BeautifulSoup(html,'html.parser')
        links = bs.findAll('a',class_="tag-title")
        for link in links:
            title = link.text
            url = 'https://bbs.leyuz.net' + link['href']
            if Share.get(link==url).frist():
                print(title,'数据已存在!')
            else:
                Share.create(title=title,site=site,link=link)
                print(title,'采集成功!')

    def xb0818(self):
        site = '线报0818'
        r = requests.get('http://www.0818tuan.com/list-1-0.html', headers=self.headers)
        html = r.text
        bs = BeautifulSoup(html, 'html.parser')
        links = bs.findAll('a', href=re.compile(r"/xbhd/\d+\.html"))
        for link in links:
            title = link.text
            url = "http://www.0818tuan.com" + link['href']
            if Share.get(link==url).frist():
                print(title,'数据已存在!')
            else:
                Share.create(title=title,site=site,link=link)
                print(title,'采集成功!')

requirements.txt

uvicorn
fastapi
aiofiles
bs4
requests
python-multipart
jinja2
lxml
pymongo
IP检测
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.responses import RedirectResponse
import time,requests,random
from bs4 import BeautifulSoup
from starlette.middleware.cors import CORSMiddleware
from random import choice


app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
app.add_middleware(         # 添加中间件
    CORSMiddleware,         # CORS中间件类
    allow_origins=["https://free998ip.herokuapp.com","https://998ip.com","https://*.ipzdc.com""https://zhaoip.xyz","https://zdl.im","http://127.0.0.1"],  # 允许起源
    allow_credentials=True, # 允许凭据
    allow_methods=["*"],    # 允许方法
    allow_headers=["*"],    # 允许头部
)

iplist =[]

def spider(cat):
    url = 'http://www.xiladaili.com/' + cat
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
    try:
        res = requests.get(url,headers=headers)
        res.encoding = "utf-8"
        soup = BeautifulSoup(res.text, 'html.parser')
        html = soup.select('tbody')
        return html[0]
    except:
        return 'error'

def porxy(ip):
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
    try:
        res = requests.get('http://www.baidu.com', proxies={"http":"http://%s"%ip},timeout=2,headers=headers)
        if res.status_code == 200:
            iplist.append({'ip':ip})
            return 'ip有效'
        elif ip in iplist:
            iplist.remove(ip)
            return 'ip无效'
        return 'ip无效'
    except:
        return 'ip无效'

th = '''<thead style="background-color: #DDF0FF">
            <tr>
                <th class="border-bottom-0">代理IP地址</th>
                <th class="border-bottom-0">匿名度</th>
                <th class="border-bottom-0">IP类型</th>
                <th class="border-bottom-0">IP位置</th>
                <th class="border-bottom-0">响应速度</th>
                <th class="border-bottom-0">存活时间</th>
                <th class="border-bottom-0">最后验证时间</th>
                <th class="border-bottom-0">评分</th>
            </tr>
        </thead>
    '''
@app.get('/')
async def index(request:Request):
    return templates.TemplateResponse('index.html',{"request":request})

@app.get('/ip')
async def api():
    try:
        return choice(iplist)
    except:
        return {"error":"IP池貌似没有可用IP,如需购买IP可联系 998IP.com"}
    

@app.get("/{ip}")
async def regist(ip):
    if str(spider(ip)) == 'error':
        return '服务器繁忙,暂时无法获取IP,请稍后再试!<br> 需要购买收费服务可联系:<a href="http://www.xhuosoft.cn">薪火代理</a>'
    return th + str(spider(ip))

@app.get("/chip/{ip}")
async def chip(ip):
    return porxy(ip) + '<br><b>注:</b>检测IP使用python的requests.get请求百度网站,不一定准确。<br> 需要购买收费服务可联系:<a href="http://www.xhuosoft.cn">薪火代理</a>'
 
  • Author:Sky
  • URL:https://zdl.im/article/fastapi
  • Copyright:All articles in this blog, except for special statements, adopt BY-NC-SA agreement. Please indicate the source!
  • Fast API
  • Workers反向代理Python学习笔记