This commit is contained in:
Administrator
2025-12-08 16:07:14 +08:00
parent 98e094209f
commit 0224785c8d
3 changed files with 235 additions and 107 deletions

View File

@@ -15,11 +15,12 @@ def createBrowser(
host=None,
port=None,
proxyUserName=None,
proxyPassword=None
proxyPassword=None,
name='google'
): # 创建或者更新窗口,指纹参数 browserFingerPrint 如没有特定需求,只需要指定下内核即可,如果需要更详细的参数,请参考文档
json_data = {
"groupId": groupId, # 分组id
'name': 'google', # 窗口名称
'name': name, # 窗口名称
'remark': '', # 备注
'proxyMethod': 1, # 代理方式 2自定义 3 提取IP
# 代理类型 ['noproxy', 'http', 'https', 'socks5', 'ssh']
@@ -29,12 +30,15 @@ def createBrowser(
'proxyUserName': proxyUserName, # 代理账号
'proxyPassword': proxyPassword, # 代理账号
"browserFingerPrint": { # 指纹对象
'coreVersion': '140' # 内核版本注意win7/win8/winserver 2012 已经不支持112及以上内核了无法打开
'coreVersion': '138' # 内核版本注意win7/win8/winserver 2012 已经不支持112及以上内核了无法打开
}
}
res = requests.post(f"{url}/browser/update",
data=json.dumps(json_data), headers=headers).json()
print(res)
browserId = res['data']['id']
return browserId
@@ -114,10 +118,10 @@ def get_group_lists_Browser():
return res.json()["data"]["list"]
def get_browser_lists_Browser(id):
def get_browser_lists_Browser(id, page=0):
json_data = {
"groupId": id,
"page": 0,
"page": page,
"pageSize": 100
}
@@ -130,7 +134,7 @@ def get_group_lists(): # 获取全部分组的信息
json_data = {
"page": 0,
"pageSize": 10,
"pageSize": 100,
"all": True
}
@@ -154,16 +158,60 @@ def group_add(groupName):
return res.json()
if __name__ == '__main__':
for i in Xstart.select().where(
Xstart.x_id.is_null()
):
ips_info = Ips.select().where(Ips.start == 1,Ips.country == "法国").order_by(fn.Rand()).get()
update_proxy_Browser(
id=i.bit_id,
host=ips_info.host,
port=ips_info.port,
proxyUserName=ips_info.username,
proxyPassword=ips_info.password
)
def browser_detail(id):
json_data = {
"id": id
}
res = requests.post(f'{url}/browser/detail', data=json.dumps(json_data), headers=headers)
return res.json()
if __name__ == '__main__':
# for i in Xstart.select().where(
# Xstart.x_id.is_null()
# ):
# ips_info = Ips.select().where(Ips.start == 1, Ips.country == "法国").order_by(fn.Rand()).get()
#
# update_proxy_Browser(
# id=i.bit_id,
# host=ips_info.host,
# port=ips_info.port,
# proxyUserName=ips_info.username,
# proxyPassword=ips_info.password
# )
fz_datas = get_group_lists()
# fz_datas['推特']
for i in range(10):
for i in get_browser_lists_Browser(id=fz_datas['推特'], page=i):
x_start_info = Xstart.get_or_none(
Xstart.bit_id == i["id"]
)
if not x_start_info:
deleteBrowser(id=i["id"])
continue
if x_start_info.start:
continue
deleteBrowser(id=i["id"])
# x_start_info.delete_instance()
x_start_info.bit_id = None
x_start_info.save()
# for i in Xstart.select():
# res = browser_detail(id=i.bit_id)
# print(res)
#
# if not res["success"]:
# i.bit_id = None
# i.save()
# print(browser_detail(id="532651f5330e4caa917e644f9b676b"))

View File

@@ -1,6 +1,7 @@
import random
import threading
import time
from concurrent.futures import ThreadPoolExecutor
import pyotp
from DrissionPage import ChromiumOptions, ChromiumPage
@@ -279,18 +280,16 @@ class Hub_Web:
url_id = ""
while True:
try:
res = tab.listen.wait() # 等待并获取一个数据包
for i in res.response.body["data"]['user']['result']["timeline"]['timeline']["instructions"][1][
"entries"]:
if "tweet" in i["entryId"]:
new_string = i["entryId"].replace("tweet-", "")
url_id += new_string + ";"
try:
res = tab.listen.wait(timeout=25) # 等待并获取一个数据包
for i in res.response.body["data"]['user']['result']["timeline"]['timeline']["instructions"][1][
"entries"]:
if "tweet" in i["entryId"]:
new_string = i["entryId"].replace("tweet-", "")
url_id += new_string + ";"
break
except:
continue
except:
pass
self.xstart_info.url_id = url_id
self.xstart_info.save()
@@ -332,7 +331,11 @@ class Hub_Web:
time.sleep(random.randint(1, 10))
def account_nurturing(self):
self.page.get(url=f"https://x.com/search?q=websea&src=typed_query")
titles = ["Bianca", "币安", "OKX", "bitget", "bybit", "mexc"]
random_element = random.choice(titles)
self.page.get(url=f"https://x.com/search?q={random_element}")
names = []
@@ -430,7 +433,7 @@ class Hub_Web:
def action(self):
self.ips_info = Ips.select().order_by(fn.Rand()).get()
self.ips_info = Ips.select().where(Ips.country == "法国").order_by(fn.Rand()).get()
if not self.xstart_info.bit_id:
self.xstart_info.ip_id = self.ips_info.id
@@ -441,6 +444,7 @@ class Hub_Web:
fz_datas = get_group_lists()
bit_id = createBrowser(
name=self.x_info.user_name,
groupId=fz_datas['推特'],
host=self.ips_info.host,
port=int(self.ips_info.port),
@@ -451,13 +455,13 @@ class Hub_Web:
self.xstart_info.bit_id = bit_id
self.xstart_info.save()
self.get_page()
# self.get_page()
# if self.get_page():
# logger.info(f"推特名字:{self.x_info.user_name},浏览器打开成功")
# else:
# logger.error(f"推特名字:{self.x_info.user_name},浏览器打开失败")
# return
if self.get_page():
logger.info(f"推特名字:{self.x_info.user_name},浏览器打开成功")
else:
logger.error(f"推特名字:{self.x_info.user_name},浏览器打开失败")
return
time.sleep(5)
@@ -469,32 +473,41 @@ class Hub_Web:
self.get_name()
# if self.login_x_main():
# self.xstart_info.start = 1
# self.xstart_info.save()
#
# logger.success(f"推特名字:{self.x_info.user_name}登录x成功")
#
# # time.sleep(25)
# #
# # for i in self.x_tab.cookies():
# # if i["name"] == "auth_token":
# # self.xstart_info.cookie = i["value"]
# # self.xstart_info.save()
#
# else:
# logger.error(f"推特名字:{self.x_info.user_name}登录x失败")
# self.xstart_info.start = 0
# self.xstart_info.save()
if self.login_x_main():
self.xstart_info.start = 1
self.xstart_info.save()
logger.success(f"推特名字:{self.x_info.user_name}登录x成功")
# time.sleep(25)
#
# for i in self.x_tab.cookies():
# if i["name"] == "auth_token":
# self.xstart_info.cookie = i["value"]
# self.xstart_info.save()
else:
logger.error(f"推特名字:{self.x_info.user_name}登录x失败")
self.xstart_info.start = 0
self.xstart_info.save()
# # 发推
# self.x_tab.ele(
# 'x://*[@id="react-root"]/div/div/div[2]/main/div/div/div/div/div/div[3]/div/div[2]/div[1]/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/div/div/div/div[1]/div/div/div/div/div/div[2]/div/div/div/div').input(
# "Websea顶级渠道注册就可享受websea合约85%返佣,量大可谈,欢迎代理咨询。 TG飞机http://t.me/webseadds")
# time.sleep(5)
# self.x_tab.ele(
# 'x://*[@id="react-root"]/div/div/div[2]/main/div/div/div/div/div/div[3]/div/div[2]/div[1]/div/div/div/div[2]/div[2]/div[2]/div/div/div/button').click()
# time.sleep(5)
# try:
# # self.x_tab.get("https://x.com/home")
# time.sleep(random.randint(3, 15))
#
# text = "Websea顶级渠道注册就可享受websea合约85%返佣,量大可谈,欢迎代理咨询。 TG飞机http://t.me/webseadds"
#
# self.x_tab.actions.click(
# 'x://*[@id="react-root"]/div/div/div[2]/main/div/div/div/div/div/div[3]/div/div[2]/div[1]/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/div/div/div/div[1]/div/div/div/div/div/div[2]/div/div/div/div').input(
# text)
#
# time.sleep(random.randint(3, 15))
# self.x_tab.actions.click(
# on_ele='x://*[@id="react-root"]/div/div/div[2]/main/div/div/div/div/div/div[3]/div/div[2]/div[1]/div/div/div/div[2]/div[2]/div[2]/div/div/div/button')
# time.sleep(5)
# except:
# pass
# # 获取推文链接
# self.get_urls()
@@ -505,60 +518,70 @@ class Hub_Web:
# except:
# pass
# # 养号流程
# try:
# self.account_nurturing()
# except Exception as e:
# print(e)
# 养号流程
try:
self.account_nurturing()
except Exception as e:
print(e)
tab = self.page.new_tab(url="https://x.com/CryptoStart_App")
time.sleep(10)
ele = tab.ele('x://span[text()="Follow"]', timeout=0.5)
if ele:
tab.actions.click(on_ele=ele)
time.sleep(10)
for i in range(random.randint(1, 10)):
tab.actions.scroll(delta_y=random.randint(400, 800))
time.sleep(random.randint(3, 10))
# 点击关注
# tab = self.page.new_tab(url="https://x.com/CryptoStart_App")
# time.sleep(10)
# ele = tab.ele('x://span[text()="Follow"]', timeout=0.5)
# if ele:
# tab.actions.click(on_ele=ele)
# time.sleep(10)
#
# for i in range(random.randint(1, 10)):
# tab.actions.scroll(delta_y=random.randint(400, 800))
# time.sleep(random.randint(3, 10))
self.page.quit()
if __name__ == '__main__':
fz_datas = get_group_lists()
# fz_datas = get_group_lists()
#
# for bit_data in get_browser_lists_Browser(id=fz_datas['推特']):
# print(bit_data)
# xstart_info, start = Xstart.get_or_create(
# bit_id=bit_data["id"],
# )
# if xstart_info.x_id:
# # continue
#
# hun_web = Hub_Web(xstart_info=xstart_info)
#
# # hun_web.action()
#
# threading.Thread(target=hun_web.action).start()
# time.sleep(random.randint(15, 60))
for bit_data in get_browser_lists_Browser(id=fz_datas['推特']):
print(bit_data)
xstart_info, start = Xstart.get_or_create(
bit_id=bit_data["id"],
)
if xstart_info.x_id:
# continue
# 同时运行
max_threads = 1
delay_between_start = 8 # 每次启动线程之间的延迟时间(秒)
hun_web = Hub_Web(xstart_info=xstart_info)
with ThreadPoolExecutor(max_workers=max_threads) as executor:
# hun_web.action()
# for x_token_info in XToken.select():
threading.Thread(target=hun_web.action).start()
time.sleep(random.randint(15, 60))
# 查询数据并转换为列表
random_x_infos = list(XToken.select())
# 直接对原列表进行打乱操作
random.shuffle(random_x_infos)
# # 同时运行
# max_threads = 1
# delay_between_start = 15 # 每次启动线程之间的延迟时间(秒)
#
# with ThreadPoolExecutor(max_workers=max_threads) as executor:
#
# for x_token_info in XToken.select():
# xstart_info, start = Xstart.get_or_create(
# x_id=x_token_info.id,
# )
#
# # if xstart_info.start:
# # continue
#
# hun_web = Hub_Web(x_info=x_token_info, xstart_info=xstart_info)
#
# executor.submit(hun_web.action)
#
# time.sleep(delay_between_start)
# 遍历打乱顺序后的列表
for x_token_info in random_x_infos:
xstart_info, start = Xstart.get_or_create(
x_id=x_token_info.id,
)
# if xstart_info.start:
# continue
hun_web = Hub_Web(x_info=x_token_info, xstart_info=xstart_info)
executor.submit(hun_web.action)
# time.sleep(random.randint(15, 60))
time.sleep(delay_between_start)

View File

@@ -1,7 +1,64 @@
import random
import time
from concurrent.futures import ThreadPoolExecutor
from models.ips import Ips
def main(_, sql_info):
import requests
cookies = {
'PHPSESSID': '8e48c4dd-69ef-561c-3082-e20564b88806',
'ipsb': '3oMlNJzDmbhOQ7wtF96V451PWTxSvKfE',
}
headers = {
'accept': '*/*',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'cache-control': 'no-cache',
'dnt': '1',
'pragma': 'no-cache',
'referer': 'https://ip.sb/',
'sec-ch-ua': '"Chromium";v="142", "Microsoft Edge";v="142", "Not_A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36 Edg/142.0.0.0',
# 'cookie': 'PHPSESSID=8e48c4dd-69ef-561c-3082-e20564b88806; ipsb=3oMlNJzDmbhOQ7wtF96V451PWTxSvKfE',
}
proxies = {
'http': f'socks5://{sql_info.username}:{sql_info.password}@{sql_info.host}:{sql_info.port}',
'https': f'socks5://{sql_info.username}:{sql_info.password}@{sql_info.host}:{sql_info.port}',
}
for i in range(5):
try:
response = requests.get('https://ipv4.ip.sb/addrinfo', cookies=cookies, headers=headers, proxies=proxies)
print(f"{_}{response.json()}")
sql_info.country = response.json()["country"]
sql_info.start = 1
sql_info.save()
return
except:
time.sleep(random.random())
if __name__ == '__main__':
# proxies = {
# 'http': f'socks5://127.0.0.1:{random.randint(42000, 42089)}',
# 'https': f'socks5://127.0.0.1:{random.randint(42000, 42089)}',
# }
with ThreadPoolExecutor(max_workers=10) as executor:
for _, i in enumerate(Ips.select()):
executor.submit(main, _, i)
time.sleep(random.random())
# print(response.json())