From 40a08a92ad35b030a6b056fc37201f51b7e116be Mon Sep 17 00:00:00 2001
From: "pixeebot[bot]" <104101892+pixeebot[bot]@users.noreply.github.com>
Date: Thu, 5 Dec 2024 08:53:22 +0000
Subject: [PATCH] Add timeout to `requests` calls
---
modules/copainsdavant_search.py | 112 +++----
modules/dirigeants_bfmtv.py | 58 ++--
modules/facebook_search.py | 44 +--
modules/instagram_search.py | 536 ++++++++++++++++----------------
modules/mail_check.py | 34 +-
modules/mail_gen.py | 120 +++----
modules/pagesblanches_search.py | 80 ++---
modules/scylla_sh.py | 62 ++--
modules/skype_search.py | 62 ++--
9 files changed, 554 insertions(+), 554 deletions(-)
diff --git a/modules/copainsdavant_search.py b/modules/copainsdavant_search.py
index 6b13688..00e24ed 100644
--- a/modules/copainsdavant_search.py
+++ b/modules/copainsdavant_search.py
@@ -1,56 +1,56 @@
-import requests, bs4, colorama, json
-from bs4 import BeautifulSoup
-
-def copains_davant(name,pren):
- headers = {
- 'Accept':'application/json, text/javascript, */*; q=0.01',
- 'X-Requested-With':'XMLHttpRequest'
- }
- r = requests.get(url='http://copainsdavant.linternaute.com/s/?full=&q={} {}&ty=1&xhr='.format(pren,name),headers=headers)
- try:
- pagephone = r.content.decode().split(',"$data":')[1].split('{"copains":')[1]
- dataa = pagephone[:-2]
- data = json.loads(dataa)
- users_list = data['users']
- user_list = []
- for i in users_list:
- i = str(i).strip()
- if i != "0":
- user_list.append(i)
- new_verified = []
- for i in user_list:
- if len(new_verified) == 0:
- profile = data['users'][i]
- full_name = (profile['lib'])
- if name.lower() and pren.lower() in full_name.lower():
- url = (profile['url'])
- new_verified.append(url)
- profil_url = new_verified[0]
- r = requests.get('http://copainsdavant.linternaute.com{}'.format(profil_url))
- pagephone = r.content
- featuresphone = "html.parser"
- soup = BeautifulSoup(pagephone,featuresphone)
- localisation = str(soup.find('span',{'class':'locality'}).text)
- naissance = str(soup.find('abbr',{'class':'bday'}).text.strip())
- name_full = str(soup.find('a',{'class':'url'}).text.strip())
- photo = str(soup.find('img',{'itemprop':'logo'})).split('itemprop="logo" src="')[1].split('"')[0]
- if "/anonymousL.jpg" in photo:
- photo = "None"
- card = soup.find('section',{'id':'vcard'}).text.strip()
- job = "None"
- nb_kids = "None"
- situation_familiale = "None"
- if "Situation familiale" in card:
- situation_familiale = card.split('Situation familiale :')[1].split(' ')[0].strip()
- situation_familiale = situation_familiale.strip()
- if "Profession" in card:
- job = card.split('Profession :')[1].split(' ')[0]
- job = " ".join(job.split()).split(' ')[0]
- if "Enfant" in card:
- nb_kids = card.split("Enfants :")[1].split(" ")[0]
- text = {'url_full':'http://copainsdavant.linternaute.com{}'.format(profil_url),'familial_situation':str(situation_familiale).replace('Enfants','').replace('Aucune','').strip(),'full_name':str(name_full),'born':str(naissance),'localisation':str(localisation),
- "nb_enfants":str(nb_kids).strip(),"Job":str(job).strip(),'pdp':str(photo)
- }
- return text
- except IndexError:
- return None
+import requests, bs4, colorama, json
+from bs4 import BeautifulSoup
+
+def copains_davant(name,pren):
+ headers = {
+ 'Accept':'application/json, text/javascript, */*; q=0.01',
+ 'X-Requested-With':'XMLHttpRequest'
+ }
+ r = requests.get(url='http://copainsdavant.linternaute.com/s/?full=&q={} {}&ty=1&xhr='.format(pren,name),headers=headers, timeout=60)
+ try:
+ pagephone = r.content.decode().split(',"$data":')[1].split('{"copains":')[1]
+ dataa = pagephone[:-2]
+ data = json.loads(dataa)
+ users_list = data['users']
+ user_list = []
+ for i in users_list:
+ i = str(i).strip()
+ if i != "0":
+ user_list.append(i)
+ new_verified = []
+ for i in user_list:
+ if len(new_verified) == 0:
+ profile = data['users'][i]
+ full_name = (profile['lib'])
+ if name.lower() and pren.lower() in full_name.lower():
+ url = (profile['url'])
+ new_verified.append(url)
+ profil_url = new_verified[0]
+ r = requests.get('http://copainsdavant.linternaute.com{}'.format(profil_url), timeout=60)
+ pagephone = r.content
+ featuresphone = "html.parser"
+ soup = BeautifulSoup(pagephone,featuresphone)
+ localisation = str(soup.find('span',{'class':'locality'}).text)
+ naissance = str(soup.find('abbr',{'class':'bday'}).text.strip())
+ name_full = str(soup.find('a',{'class':'url'}).text.strip())
+ photo = str(soup.find('img',{'itemprop':'logo'})).split('itemprop="logo" src="')[1].split('"')[0]
+ if "/anonymousL.jpg" in photo:
+ photo = "None"
+ card = soup.find('section',{'id':'vcard'}).text.strip()
+ job = "None"
+ nb_kids = "None"
+ situation_familiale = "None"
+ if "Situation familiale" in card:
+ situation_familiale = card.split('Situation familiale :')[1].split(' ')[0].strip()
+ situation_familiale = situation_familiale.strip()
+ if "Profession" in card:
+ job = card.split('Profession :')[1].split(' ')[0]
+ job = " ".join(job.split()).split(' ')[0]
+ if "Enfant" in card:
+ nb_kids = card.split("Enfants :")[1].split(" ")[0]
+ text = {'url_full':'http://copainsdavant.linternaute.com{}'.format(profil_url),'familial_situation':str(situation_familiale).replace('Enfants','').replace('Aucune','').strip(),'full_name':str(name_full),'born':str(naissance),'localisation':str(localisation),
+ "nb_enfants":str(nb_kids).strip(),"Job":str(job).strip(),'pdp':str(photo)
+ }
+ return text
+ except IndexError:
+ return None
diff --git a/modules/dirigeants_bfmtv.py b/modules/dirigeants_bfmtv.py
index 572193e..dfb556f 100644
--- a/modules/dirigeants_bfmtv.py
+++ b/modules/dirigeants_bfmtv.py
@@ -1,29 +1,29 @@
-import requests, bs4
-from bs4 import BeautifulSoup
-
-def bfmtv_search(name,pren):
- r = requests.get("https://dirigeants.bfmtv.com/recherche/q/{}5+{}6".format(name,pren))
- page = r.content
- features = "html.parser"
- soup = BeautifulSoup(page, features)
- try:
- full_name = soup.find('a',{'class':'nom'}).text+" "+soup.find('td',{'class':'verif_col2'}).text
- naissance = soup.find('td',{'class':'verif_col3'}).text.replace('Nรฉ le ','')
- mandats = soup.find('td',{'class':'verif_col5'}).text
- fonction = soup.find('td',{'class':'verif_col4'}).text
- link = soup.find('a',{'class':'nom'})
- link = str(link).replace('')[1]
- adresse = adresse_full.split("
")[0]
- cp = adresse_full.split("
")[1].split("")[0]
- text = {"addr":adresse+cp,'company':entreprise,'link':link,'full_name':full_name,'naissance':naissance,'mandats':mandats,'fonction':fonction}
- return text
- except AttributeError:
- return None
+import requests, bs4
+from bs4 import BeautifulSoup
+
+def bfmtv_search(name,pren):
+ r = requests.get("https://dirigeants.bfmtv.com/recherche/q/{}5+{}6".format(name,pren), timeout=60)
+ page = r.content
+ features = "html.parser"
+ soup = BeautifulSoup(page, features)
+ try:
+ full_name = soup.find('a',{'class':'nom'}).text+" "+soup.find('td',{'class':'verif_col2'}).text
+ naissance = soup.find('td',{'class':'verif_col3'}).text.replace('Nรฉ le ','')
+ mandats = soup.find('td',{'class':'verif_col5'}).text
+ fonction = soup.find('td',{'class':'verif_col4'}).text
+ link = soup.find('a',{'class':'nom'})
+ link = str(link).replace('')[1]
+ adresse = adresse_full.split("
")[0]
+ cp = adresse_full.split("
")[1].split("")[0]
+ text = {"addr":adresse+cp,'company':entreprise,'link':link,'full_name':full_name,'naissance':naissance,'mandats':mandats,'fonction':fonction}
+ return text
+ except AttributeError:
+ return None
diff --git a/modules/facebook_search.py b/modules/facebook_search.py
index 4fad6fe..1de92b7 100644
--- a/modules/facebook_search.py
+++ b/modules/facebook_search.py
@@ -1,22 +1,22 @@
-import requests, bs4, re
-from bs4 import BeautifulSoup
-
-def facebook_search(name,pren):
- url = "https://fr-fr.facebook.com/public/{}-{}".format(pren,name)
- page = requests.get(url).content.decode('utf-8')
- nameAccount = re.findall("width=\"72\" height=\"72\" alt=\"([a-zA-Z0-9_ รฉ , ]+)\" />", page)
- total_accounts = []
- for i in nameAccount:
- if name.lower() in i.lower() and pren.lower() in i.lower():
- total_accounts.append(i)
- else:
- pass
- if len(total_accounts) == 0:
- return None
- else:
- return total_accounts
-
-'''
-This code cand be found at :
-https://github.com/lulz3xploit/LittleBrother/blob/master/core/facebookSearchTool.py
-'''
+import requests, bs4, re
+from bs4 import BeautifulSoup
+
+def facebook_search(name,pren):
+ url = "https://fr-fr.facebook.com/public/{}-{}".format(pren,name)
+ page = requests.get(url, timeout=60).content.decode('utf-8')
+ nameAccount = re.findall("width=\"72\" height=\"72\" alt=\"([a-zA-Z0-9_ รฉ , ]+)\" />", page)
+ total_accounts = []
+ for i in nameAccount:
+ if name.lower() in i.lower() and pren.lower() in i.lower():
+ total_accounts.append(i)
+ else:
+ pass
+ if len(total_accounts) == 0:
+ return None
+ else:
+ return total_accounts
+
+'''
+This code cand be found at :
+https://github.com/lulz3xploit/LittleBrother/blob/master/core/facebookSearchTool.py
+'''
diff --git a/modules/instagram_search.py b/modules/instagram_search.py
index fc0dfd9..470f93f 100644
--- a/modules/instagram_search.py
+++ b/modules/instagram_search.py
@@ -1,268 +1,268 @@
-import requests, bs4
-from bs4 import BeautifulSoup
-
-import requests,bs4
-from bs4 import BeautifulSoup
-
-
-# EXCLUSIVE MODULE TO DAPROFILER
-
-def getInstagramEmailFromBio(username):
- bios = []
-
- url = "https://smihub.com/v/{}".format(username)
-
- r = requests.get(url=url)
- page = r.content.decode()
- features = "html.parser"
- soup = BeautifulSoup(page,features)
-
- bioo = str(soup.find('div',{'class':'user__info-desc'}))
-
- bioo = bioo.replace('
')[1].replace('
')[0].split('Instagram\'s posts" class="profile-name-link" href="')[1].split('">')[1]) - profile_formated = ('{}\t| {}'.format(at_username,username)) - if name.lower() in profile_formated.lower() and name.lower() in profile_formated.lower(): - profiles.append(str(profile_formated)) - return profiles +import requests, bs4 +from bs4 import BeautifulSoup + +import requests,bs4 +from bs4 import BeautifulSoup + + +# EXCLUSIVE MODULE TO DAPROFILER + +def getInstagramEmailFromBio(username): + bios = [] + + url = "https://smihub.com/v/{}".format(username) + + r = requests.get(url=url, timeout=60) + page = r.content.decode() + features = "html.parser" + soup = BeautifulSoup(page,features) + + bioo = str(soup.find('div',{'class':'user__info-desc'})) + + bioo = bioo.replace('
')[1].replace('
')[0].split('Instagram\'s posts" class="profile-name-link" href="')[1].split('">')[1]) + profile_formated = ('{}\t| {}'.format(at_username,username)) + if name.lower() in profile_formated.lower() and name.lower() in profile_formated.lower(): + profiles.append(str(profile_formated)) + return profiles diff --git a/modules/mail_check.py b/modules/mail_check.py index 5ad4d02..02d2296 100644 --- a/modules/mail_check.py +++ b/modules/mail_check.py @@ -1,17 +1,17 @@ -#trouvรฉ sur "https://docs.isitarealemail.com/how-to-validate-email-addresses-in-python" -# modifiรฉ (un peu) par eupone -import requests - -def verify(mail): - response = requests.get("https://isitarealemail.com/api/email/validate?email={}".format(mail),params = {'Authorization':'fa86a707-750e-485c-8ec3-86eddd7ec4d0'},headers = {'Authorization': "Bearer fa86a707-750e-485c-8ec3-86eddd7ec4d0"}) - try: - data = response.json() - status = data['status'] - if status == "valid": - return True - elif status == "invalid": - return None - else: - return None - except: - return None +#trouvรฉ sur "https://docs.isitarealemail.com/how-to-validate-email-addresses-in-python" +# modifiรฉ (un peu) par eupone +import requests + +def verify(mail): + response = requests.get("https://isitarealemail.com/api/email/validate?email={}".format(mail),params = {'Authorization':'fa86a707-750e-485c-8ec3-86eddd7ec4d0'},headers = {'Authorization': "Bearer fa86a707-750e-485c-8ec3-86eddd7ec4d0"}, timeout=60) + try: + data = response.json() + status = data['status'] + if status == "valid": + return True + elif status == "invalid": + return None + else: + return None + except: + return None diff --git a/modules/mail_gen.py b/modules/mail_gen.py index 1c093b0..b64fed4 100644 --- a/modules/mail_gen.py +++ b/modules/mail_gen.py @@ -1,60 +1,60 @@ -import threading, requests, bs4 -from bs4 import BeautifulSoup -from modules import mail_check - -def check(name,pren): - results = [ - "{}.{}@gmail.com".format(name,pren), - "{}.{}@yahoo.com".format(name,pren), - "{}{}@yahoo.com".format(name,pren), - "{}{}@yahoo.fr".format(name,pren), - "{}.{}@aol.com".format(name,pren), - "{}{}@aol.com".format(name,pren), - "{}.{}@hotmail.com".format(name,pren), - "{}{}@hotmail.com".format(name,pren), - "{}{}@hotmail.fr".format(name,pren), - "{}{}@outlook.fr".format(name,pren), - "{}.{}@outlook.com".format(name,pren), - "{}{}@outlook.com".format(name,pren), - ] - valid_mails = [] - for i in results: - a = mail_check.verify(mail=i) - if a is not None: - valid_mails.append(i) - return valid_mails - -def skype2email(name,pren): - url = f"https://www.skypli.com/search/{name} {pren}" - r = requests.get(url) - page = r.content - features = "html.parser" - soup = BeautifulSoup(page, features) - - profiles = soup.find_all('span',{'class':'search-results__block-info-username'})[0:5] - - profiless = [] - - for i in profiles: - if "live:." in i.text: - pass - else: - profiless.append(i.text.replace('live:','').replace('_1','')) - - valid_emails = [] - - for i in profiless: - emails = [ - i+"@aol.com", - i+"@yahoo.com", - i+"@gmail.com", - i+"@hotmail.com", - i+"@hotmail.fr", - i+"@outlook.fr", - i+"@outlook.com" - ] - for i in emails: - a = mail_check.verify(mail=i) - if a is not None: - valid_emails.append(i) - return valid_emails +import threading, requests, bs4 +from bs4 import BeautifulSoup +from modules import mail_check + +def check(name,pren): + results = [ + "{}.{}@gmail.com".format(name,pren), + "{}.{}@yahoo.com".format(name,pren), + "{}{}@yahoo.com".format(name,pren), + "{}{}@yahoo.fr".format(name,pren), + "{}.{}@aol.com".format(name,pren), + "{}{}@aol.com".format(name,pren), + "{}.{}@hotmail.com".format(name,pren), + "{}{}@hotmail.com".format(name,pren), + "{}{}@hotmail.fr".format(name,pren), + "{}{}@outlook.fr".format(name,pren), + "{}.{}@outlook.com".format(name,pren), + "{}{}@outlook.com".format(name,pren), + ] + valid_mails = [] + for i in results: + a = mail_check.verify(mail=i) + if a is not None: + valid_mails.append(i) + return valid_mails + +def skype2email(name,pren): + url = f"https://www.skypli.com/search/{name} {pren}" + r = requests.get(url, timeout=60) + page = r.content + features = "html.parser" + soup = BeautifulSoup(page, features) + + profiles = soup.find_all('span',{'class':'search-results__block-info-username'})[0:5] + + profiless = [] + + for i in profiles: + if "live:." in i.text: + pass + else: + profiless.append(i.text.replace('live:','').replace('_1','')) + + valid_emails = [] + + for i in profiless: + emails = [ + i+"@aol.com", + i+"@yahoo.com", + i+"@gmail.com", + i+"@hotmail.com", + i+"@hotmail.fr", + i+"@outlook.fr", + i+"@outlook.com" + ] + for i in emails: + a = mail_check.verify(mail=i) + if a is not None: + valid_emails.append(i) + return valid_emails diff --git a/modules/pagesblanches_search.py b/modules/pagesblanches_search.py index a5cddf7..97b1845 100644 --- a/modules/pagesblanches_search.py +++ b/modules/pagesblanches_search.py @@ -1,40 +1,40 @@ -import requests, bs4, colorama -from colorama import Fore -from bs4 import BeautifulSoup - -def adresse_search(name,pren): - r = requests.get('https://www.pagesjaunes.fr/pagesblanches/recherche?quoiqui={} {}'.format(name,pren)) - page = r.content - features = "html.parser" - soup = BeautifulSoup(page, features) - - target_name = soup.find("a", {"class": "denomination-links pj-lb pj-link"}) - target_addr = soup.find("a", {"class": "adresse pj-lb pj-link"}) - target_phon = soup.find('strong',{'class':'num'}) - - try: - name_full = (target_name.text.strip()) - addr_full = (target_addr.text.replace(', voir sur la carte','').replace('\n',' ').strip()) - phon_full = (target_phon.text.strip()) - - if name.lower() in name_full.lower(): - try: - r = requests.get('https://www.infos-numero.com/ajax/NumberInfo?num={}'.format(phon_full)) - data = r.json() - - type_tel = (data['info']['type']) - if type_tel == "FIXED_LINE": - type_tel = "Fixe" - carrier = (data['info']['carrier']) - if len(carrier) <= 1: - carrier = 0 - carrier = None - localisation = (data['info']['ville']) - text = {'Phone':phon_full,'Name':name_full,'Adress':addr_full,'Type_tel':type_tel,"Loc_phone":localisation,'carrier':carrier} - return text - except: - return {'Phone':phon_full,'Name':name_full,'Adress':addr_full,'Type_tel':None,"Loc_phone":None,'carrier':None} - else: - return None - except AttributeError: - return None +import requests, bs4, colorama +from colorama import Fore +from bs4 import BeautifulSoup + +def adresse_search(name,pren): + r = requests.get('https://www.pagesjaunes.fr/pagesblanches/recherche?quoiqui={} {}'.format(name,pren), timeout=60) + page = r.content + features = "html.parser" + soup = BeautifulSoup(page, features) + + target_name = soup.find("a", {"class": "denomination-links pj-lb pj-link"}) + target_addr = soup.find("a", {"class": "adresse pj-lb pj-link"}) + target_phon = soup.find('strong',{'class':'num'}) + + try: + name_full = (target_name.text.strip()) + addr_full = (target_addr.text.replace(', voir sur la carte','').replace('\n',' ').strip()) + phon_full = (target_phon.text.strip()) + + if name.lower() in name_full.lower(): + try: + r = requests.get('https://www.infos-numero.com/ajax/NumberInfo?num={}'.format(phon_full), timeout=60) + data = r.json() + + type_tel = (data['info']['type']) + if type_tel == "FIXED_LINE": + type_tel = "Fixe" + carrier = (data['info']['carrier']) + if len(carrier) <= 1: + carrier = 0 + carrier = None + localisation = (data['info']['ville']) + text = {'Phone':phon_full,'Name':name_full,'Adress':addr_full,'Type_tel':type_tel,"Loc_phone":localisation,'carrier':carrier} + return text + except: + return {'Phone':phon_full,'Name':name_full,'Adress':addr_full,'Type_tel':None,"Loc_phone":None,'carrier':None} + else: + return None + except AttributeError: + return None diff --git a/modules/scylla_sh.py b/modules/scylla_sh.py index 5406ccb..77457a0 100644 --- a/modules/scylla_sh.py +++ b/modules/scylla_sh.py @@ -1,31 +1,31 @@ -import requests - -def scylla_search(email): - try: - r = requests.get('https://scylla.so/search?q=email:{}'.format(email)) - if r.status_code == 500 or r.status_code == 502: - return None - try: - response = r.json() - if len(response) == 0: - return None - else: - total = [] - for i in response[0:10]: - leak_name = i['fields']['domain'] - try: - password = i['fields']['password'] - except: - password = i['fields']['passhash'] - text = { - 'Name':leak_name, - 'Password':password - } - total.append(text) - return total - except: - return None - except requests.exceptions.ConnectionError: - return None - -# By Lui#6166 from Prism Intelligence Group +import requests + +def scylla_search(email): + try: + r = requests.get('https://scylla.so/search?q=email:{}'.format(email), timeout=60) + if r.status_code == 500 or r.status_code == 502: + return None + try: + response = r.json() + if len(response) == 0: + return None + else: + total = [] + for i in response[0:10]: + leak_name = i['fields']['domain'] + try: + password = i['fields']['password'] + except: + password = i['fields']['passhash'] + text = { + 'Name':leak_name, + 'Password':password + } + total.append(text) + return total + except: + return None + except requests.exceptions.ConnectionError: + return None + +# By Lui#6166 from Prism Intelligence Group diff --git a/modules/skype_search.py b/modules/skype_search.py index 7938d96..51971e2 100644 --- a/modules/skype_search.py +++ b/modules/skype_search.py @@ -1,31 +1,31 @@ -import requests, bs4 -from bs4 import BeautifulSoup - -from modules import mail_check - -def skype_searchh(name,pren): - url = f"https://www.skypli.com/search/{name} {pren}" - r = requests.get(url) - page = r.content - features = "html.parser" - soup = BeautifulSoup(page, features) - - profiles = soup.find_all('span',{'class':'search-results__block-info-username'})[0:5] - - profiless = [] - - for i in profiles: - profiless.append(i.text) - - profile_dict = [] - - for i in profiless: - r = requests.get('https://www.skypli.com/profile/{}'.format(i)) - page = r.content - features = "html.parser" - soup = BeautifulSoup(page, features) - name = soup.find_all('div',{'class':'profile-box__table-value'})[1] - full_name = (name.text.strip()) - - profile_dict.append('{} \t| {}'.format(i,full_name,)) - return profile_dict +import requests, bs4 +from bs4 import BeautifulSoup + +from modules import mail_check + +def skype_searchh(name,pren): + url = f"https://www.skypli.com/search/{name} {pren}" + r = requests.get(url, timeout=60) + page = r.content + features = "html.parser" + soup = BeautifulSoup(page, features) + + profiles = soup.find_all('span',{'class':'search-results__block-info-username'})[0:5] + + profiless = [] + + for i in profiles: + profiless.append(i.text) + + profile_dict = [] + + for i in profiless: + r = requests.get('https://www.skypli.com/profile/{}'.format(i), timeout=60) + page = r.content + features = "html.parser" + soup = BeautifulSoup(page, features) + name = soup.find_all('div',{'class':'profile-box__table-value'})[1] + full_name = (name.text.strip()) + + profile_dict.append('{} \t| {}'.format(i,full_name,)) + return profile_dict