GHunt v2 is RELEASED BOYSSS

This commit is contained in:
mxrch
2022-12-03 20:50:37 +01:00
parent 5a20e78198
commit e6b0f876e7
96 changed files with 6624 additions and 3123 deletions

57
.github/stale.yml vendored
View File

@@ -1,57 +0,0 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 30
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 21
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- pinned
- security
- bug
- keep
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
pulls:
daysUntilStale: 60
markComment: >
This pull request has been automatically marked as stale because it has not had
activity on the last 60 days. It will be closed in 7 days if no further activity occurs. Thank you
for your contributions.
issues:
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity on the last 18 days. It will be closed in 6 days if no further activity occurs.

View File

@@ -1,71 +0,0 @@
#Don't ask me what any of this means, this file was generated with Github's web-GUI
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
name: "CodeQL"
on:
push:
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [master]
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
# Override automatic language detection by changing the below list
# Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
language: ['python']
# Learn more...
# https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,34 +0,0 @@
name: 'Build & Push Image'
on:
push:
branches:
- master
pull_request:
jobs:
build:
name: 'Build'
runs-on: ubuntu-latest
env:
IMAGE_NAME: ghunt
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GHCR_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
push: ${{ GitHub.event_name != 'pull_request' }}
tags: ghcr.io/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}:latest

24
.github/workflows/sponsors.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Generate Sponsors README
on:
workflow_dispatch:
schedule:
- cron: 30 15 * * 0-6
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout 🛎️
uses: actions/checkout@v2
- name: Generate Sponsors 💖
uses: JamesIves/github-sponsors-readme-action@v1
with:
token: ${{ secrets.PAT }}
file: 'README.md'
template: '<a href="https://github.com/{{{ login }}}"><img src="https://github.com/{{{ login }}}.png" width="50px" alt="{{{ login }}}" /></a>&nbsp;&nbsp;'
- name: Deploy to GitHub Pages 🚀
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: master
folder: '.'

16
.gitignore vendored
View File

@@ -1,13 +1,5 @@
__pycache__/
lib/__pycache__/
modules/__pycache__/
profile_pics/*.jpg
resources/
chromedriver
chromedriver.exe
data.txt
login.py
.DS_Store
debug.log
*.env
.vscode
build/
dist/
__pycache__/
ghunt.egg-info/

View File

@@ -1,27 +0,0 @@
FROM python:3.8.6-slim-buster
ARG UID=1000
ARG GID=1000
WORKDIR /usr/src/app
RUN groupadd -o -g ${GID} -r app && adduser --system --home /home/app --ingroup app --uid ${UID} app && \
chown -R app:app /usr/src/app && \
apt-get update && \
apt-get install -y curl unzip gnupg && \
curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list && \
apt-get update && \
apt-get install -y google-chrome-stable && \
rm -rf /var/lib/apt/lists/*
COPY --chown=app:app requirements.txt docker/download_chromedriver.py ./
RUN python3 -m pip install --no-cache-dir -r requirements.txt && \
python3 download_chromedriver.py && chown -R app:app /usr/src/app
COPY --chown=app:app . .
USER app
ENTRYPOINT [ "python3" ]

373
LICENSE
View File

@@ -1,373 +0,0 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

654
LICENSE.md Normal file
View File

@@ -0,0 +1,654 @@
### For easier reading : https://choosealicense.com/licenses/agpl-3.0/
\
GNU Affero General Public License
=================================
_Version 3, 19 November 2007_
_Copyright © 2007 Free Software Foundation, Inc. &lt;<http://fsf.org/>&gt;_
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
## Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: **(1)** assert copyright on the software, and **(2)** offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
## TERMS AND CONDITIONS
### 0. Definitions
“This License” refers to version 3 of the GNU Affero General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this
License. Each licensee is addressed as “you”. “Licensees” and
“recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a “modified version” of the
earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based
on the Program.
To “propagate” a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices”
to the extent that it includes a convenient and prominently visible
feature that **(1)** displays an appropriate copyright notice, and **(2)**
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
### 1. Source Code
The “source code” for a work means the preferred form of the work
for making modifications to it. “Object code” means any non-source
form of a work.
A “Standard Interface” means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other
than the work as a whole, that **(a)** is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and **(b)** serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
“Major Component”, in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
### 2. Basic Permissions
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
### 3. Protecting Users' Legal Rights From Anti-Circumvention Law
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
### 4. Conveying Verbatim Copies
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
### 5. Conveying Modified Source Versions
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
* **a)** The work must carry prominent notices stating that you modified
it, and giving a relevant date.
* **b)** The work must carry prominent notices stating that it is
released under this License and any conditions added under section 7.
This requirement modifies the requirement in section 4 to
“keep intact all notices”.
* **c)** You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
* **d)** If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
“aggregate” if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
### 6. Conveying Non-Source Forms
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
* **a)** Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
* **b)** Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either **(1)** a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or **(2)** access to copy the
Corresponding Source from a network server at no charge.
* **c)** Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
* **d)** Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
* **e)** Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A “User Product” is either **(1)** a “consumer product”, which means any
tangible personal property which is normally used for personal, family,
or household purposes, or **(2)** anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, “normally used” refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
“Installation Information” for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
### 7. Additional Terms
“Additional permissions” are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
* **a)** Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
* **b)** Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
* **c)** Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
* **d)** Limiting the use for publicity purposes of names of licensors or
authors of the material; or
* **e)** Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
* **f)** Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered “further
restrictions” within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
### 8. Termination
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated **(a)**
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and **(b)** permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
### 9. Acceptance Not Required for Having Copies
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
### 10. Automatic Licensing of Downstream Recipients
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
### 11. Patents
A “contributor” is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, “control” includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To “grant” such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either **(1)** cause the Corresponding Source to be so
available, or **(2)** arrange to deprive yourself of the benefit of the
patent license for this particular work, or **(3)** arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. “Knowingly relying” means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is “discriminatory” if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license **(a)** in connection with copies of the covered work
conveyed by you (or copies made from those copies), or **(b)** primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
### 12. No Surrender of Others' Freedom
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
### 13. Remote Network Interaction; Use with the GNU General Public License
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
### 14. Revised Versions of this License
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License “or any later version” applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
### 15. Disclaimer of Warranty
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
### 16. Limitation of Liability
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
### 17. Interpretation of Sections 15 and 16
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
_END OF TERMS AND CONDITIONS_
## How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the “copyright” line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a “Source” link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a “copyright disclaimer” for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
&lt;<http://www.gnu.org/licenses/>&gt;.

241
README.md
View File

@@ -1,188 +1,115 @@
![screenshot](https://files.catbox.moe/8a5nzs.png)
![](assets/long_banner.png)
![Python minimum version](https://img.shields.io/badge/Python-3.8%2B-brightgreen)
<br>
![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/mxrch/ghunt) ![GitHub Workflow Status](https://img.shields.io/github/workflow/status/mxrch/ghunt/CodeQL?label=CodeQL)
# Description
GHunt is a modulable OSINT tool designed to evolve over the years, and incorporates many techniques to investigate Google accounts, or objects.\
It currently has **email**, **document**, **youtube** and **gaia** modules.
![Python minimum version](https://img.shields.io/badge/Python-3.10%2B-brightgreen)
🔥 **_GHunt is being completely refactored_**, to allow use as a Python library, removing Selenium and Google Chrome dependencies, using definition types and async, to prepare for v2.\
You can track the progress on this project here: https://github.com/mxrch/GHunt/projects/1 \
And on this branch : https://github.com/mxrch/GHunt/tree/refactor \
Please understand that the activity of the master branch will now be reduced, and therefore the pull requests too.
# 😊 Description
## What can GHunt find ?
GHunt (v2) is an offensive Google framework, designed to evolve efficiently.\
It's currently focused on OSINT, but any use related with Google is possible.
🗺️ **Email** module:
- Owner's name
- Gaia ID
- Last time the profile was edited
- Profile picture (+ detect custom picture)
- If the account is a Hangouts Bot
- Activated Google services (YouTube, Photos, Maps, News360, Hangouts, etc.)
- Possible YouTube channel
- Possible other usernames
- Google Maps reviews (M)
- Possible physical location (M)
- Events from Google Calendar (C)
- Organizations (work & education) (A)
- Contact emails (A)
- Contact phones (A)
- Addresses (A)
- ~~Public photos (P)~~
- ~~Phones models (P)~~
- ~~Phones firmwares (P)~~
- ~~Installed softwares (P)~~
Features :
- CLI usage and modules
- Python library usage
- Fully async
- JSON export
- Browser extension to ease login
🗺️ **Document** module:
- Owner's name
- Owner's Gaia ID
- Owner's profile picture (+ detect custom picture)
- Creation date
- Last time the document was edited
- Public permissions
- Your permissions
# ✔️ Requirements
- Python >= 3.10
🗺️ **Youtube** module:
- Owner's Gaia ID (through Wayback Machine)
- Detect if the email is visible
- Country
- Description
- Total views
- Joined date
- Primary links (social networks)
- All infos accessible by the Gaia module
# ⚙️ Installation
🗺️ **Gaia** module:
- Owner's name
- Profile picture (+ detect custom picture)
- Possible YouTube channel
- Possible other usernames
- Google Maps reviews (M)
- Possible physical location (M)
- Organizations (work & education) (A)
- Contact emails (A)
- Contact phones (A)
- Addresses (A)
The features marked with a **(P)** require the target account to have the default setting of `Allow the people you share content with to download your photos and videos` on the Google AlbumArchive, or if the target has ever used Picasa linked to their Google account.\
More info [here](https://github.com/mxrch/GHunt#%EF%B8%8F-protecting-yourself).
Those marked with a **(M)** require the Google Maps reviews of the target to be public (they are by default).
Those marked with a **(C)** require user to have Google Calendar set on public (default it is closed).
Those marked with a **(A)** require user to have the additional info set [on profile](https://myaccount.google.com/profile) with privacy option "Anyone" enabled.
# Screenshots
<p align="center">
<img src="https://files.catbox.moe/2zb1z9.png">
</p>
## 📰 Latest news
- **02/10/2020** : Since a few days ago, Google returns a 404 when we try to access someone's Google Photos public albums, we can only access it if we have a link to one of their albums.\
Either this is a bug and this will be fixed, either it's a protection that we need to find how to bypass.
- **03/10/2020** : Successfully bypassed. 🕺 (commit 01dc016)\
It requires the "Profile photos" album to be public (it is by default)
- **20/10/2020** : Google WebArchive now returns a 404 even when coming from the "Profile photos" album, so **the photos scraping is temporary (or permanently) disabled.** (commit e762543)
- **25/11/2020** : Google now removes the name from the Google Maps profile if the user has 0 reviews (or contributions, even private). I did not find a bypass for the moment, so **all the help in the research of a bypass is appreciated.**
- **20/03/2021** : Successfully bypassed. 🕺 (commit b3b01bc)
# Installation
## Manual installation
- Make sure you have Python 3.8+ installed. (I developed it with Python 3.8.1)
- Some Python modules are required which are contained in `requirements.txt` and will be installed below.
### 1. Chromedriver & Google Chrome
This project uses Selenium and automatically downloads the correct driver for your Chrome version. \
⚠️ So just make sure to have Google Chrome installed.
### 2. Cloning
Open your terminal, and execute the following commands :
```bash
git clone https://github.com/mxrch/ghunt
cd ghunt
$ pip3 install pipx
$ pipx ensurepath
$ pipx install ghunt
```
It will automatically use venvs to avoid dependency conflicts with other projects.
### 3. Requirements
In the GHunt folder, run:
# 💃 Usage
## Login
First, launch the listener by doing `ghunt login` and choose between 1 of the 2 first methods :
```bash
python3 -m pip install -r requirements.txt
```
Adapt the command to your operating system if needed.
$ ghunt login
## Docker
The Docker image is automatically built and pushed to Dockerhub after each push on this repo.\
You can pull the Docker image with:
[1] (Companion) Put GHunt on listening mode (currently not compatible with docker)
[2] (Companion) Paste base64-encoded cookies
[3] Enter manually all cookies
```
docker pull ghcr.io/mxrch/ghunt
Choice =>
```
Then, you can use the `docker_check_and_gen.sh` and `docker_hunt.sh` to invoke GHunt through Docker, or you can use these commands :
Then, use GHunt Companion to complete the login.
```
docker run -v ghunt-resources:/usr/src/app/resources -ti ghcr.io/mxrch/ghunt check_and_gen.py
docker run -v ghunt-resources:/usr/src/app/resources -ti ghcr.io/mxrch/ghunt ghunt.py
```
# Usage
For the first run and sometime after, you'll need to check the validity of your cookies.\
To do this, run `check_and_gen.py`. \
If you don't have cookies stored (ex: first launch), you will be asked for the required cookies. If they are valid, it will generate the Authentication token and the Google Docs & Hangouts tokens.
Then, you can run the tool like this:
```bash
python3 ghunt.py email larry@google.com
```
```bash
python3 ghunt.py doc https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms
```
⚠️ I suggest you make an empty account just for this or use an account where you never login because depending on your browser/location, re-logging in into the Google Account used for the cookies can deauthorize them.
# Where I get these cookies ?
## Auto (faster)
You can download the GHunt Companion extension that will automate the cookies extraction in 1-click !\
The extension is available on the following stores :\
\
[![Firefox](https://ffp4g1ylyit3jdyti1hqcvtb-wpengine.netdna-ssl.com/addons/files/2015/11/get-the-addon.png)](https://addons.mozilla.org/fr/firefox/addon/ghunt-companion/)&nbsp;&nbsp;&nbsp;[![Chrome](https://storage.googleapis.com/web-dev-uploads/image/WlD8wC6g8khYWPJUsQceQkhXSlv1/UV4C4ybeBTsZt43U4xis.png)](https://chrome.google.com/webstore/detail/ghunt-companion/dpdcofblfbmmnikcbmmiakkclocadjab)&nbsp;&nbsp;&nbsp;[![Edge](https://user-images.githubusercontent.com/11660256/111323589-4f4c7c00-866a-11eb-80ff-da7de777d7c0.png)](https://microsoftedge.microsoft.com/addons/detail/ghunt-companion/jhgmpcigklnbjglpipnbnjhdncoihhdj)
[![Firefox](https://files.catbox.moe/5g2ld5.png)](https://addons.mozilla.org/fr/firefox/addon/ghunt-companion/)&nbsp;&nbsp;&nbsp;[![Chrome](https://storage.googleapis.com/web-dev-uploads/image/WlD8wC6g8khYWPJUsQceQkhXSlv1/UV4C4ybeBTsZt43U4xis.png)](https://chrome.google.com/webstore/detail/ghunt-companion/dpdcofblfbmmnikcbmmiakkclocadjab)
You just need to launch the check_and_gen.py file and choose the extraction mode you want to use, between putting GHunt in listening mode, or copy/paste the encoded cookies in base64.
## Modules
## Manual
1. Be logged-in to myaccount.google.com
2. After that, open the Dev Tools window and navigate to the Network tab\
If you don't know how to open it, just right-click anywhere and click "Inspect Element".
3. Go to myaccount.google.com, and in the browser requests, select the GET on "accounts.google.com" that gives a 302 redirect
4. Then you'll find every cookie you need in the "cookies" section.
Then, profit :
```bash
usage: ghunt [-h] {login,email,gaia,drive} ...
![cookies](https://files.catbox.moe/15j8pg.png)
positional arguments:
{login,email,gaia,drive}
login (--clean) Authenticate GHunt to Google.
email (--json) Get information on an email address.
gaia (--json) Get information on a Gaia ID.
drive (--json) Get information on a Drive file or folder.
# 🛡️ Protecting yourself
Regarding the collection of metadata from your Google Photos account:
options:
-h, --help show this help message and exit
```
Given that Google shows **"X require access"** on [your Google Account Dashboard](https://myaccount.google.com/intro/dashboard), you might imagine that you had to explicitly authorize another account in order for it to access your pictures; but this is not the case.\
Any account can access your AlbumArchive (by default):
📄 You can also use --json with email, gaia and drive modules to export in JSON ! Example :
![account-dashboard](https://files.catbox.moe/ufqc9g.jpg)
```bash
$ ghunt email <email_address> --json user_data.json
```
Here's how to check and fix the fact that you're vulnerable (which you most likely are):\
Go to https://get.google.com/albumarchive/ while logged in with your Google account. You will be **automatically** redirected to your correct albumarchive URL (`https://get.google.com/albumarchive/YOUR-GOOGLE-ID-HERE`). After that, click the three dots on the top left corner, and click on **setting**
**Have fun 🥰💞**
![three-dots-setting](https://files.catbox.moe/ru6kci.jpg)
# 🧑‍💻 Developers
Then, uncheck the only option there:
📕 I started writing some docs [here](https://github.com/mxrch/GHunt/wiki) and examples [here](https://github.com/mxrch/GHunt/tree/master/examples), feel free to contribute !
![setting](https://files.catbox.moe/b8879l.jpg)
To use GHunt as a lib, you can't use pipx because it uses a venv.\
So you should install GHunt with pip :
```bash
$ pip3 install ghunt
```
And now, you should be able to `import ghunt` in your projects !\
You can right now play with the [examples](https://github.com/mxrch/GHunt/tree/master/examples).
On another note, the target account will **also** be vulnerable if they have ever used **Picasa** linked to their Google account in any way, shape or form. For more details on this, read ItsIgnacioPortal's comment on [issue #10](https://github.com/mxrch/GHunt/issues/10).\
For now, the only (known) solution to this is to delete the Picasa albums from your AlbumArchive.
# 📮 Details
# Thanks
This tool is based on [Sector's research on Google IDs](https://sector035.nl/articles/getting-a-grasp-on-google-ids) and completed by my own as well.\
If I have the motivation to write a blog post about it, I'll add the link here !
- Palenath (for the name bypass)
## Obvious disclaimer
This tool is for educational purposes only, I am not responsible for its use.
## Less obvious disclaimer
This project is under [AGPL Licence](https://choosealicense.com/licenses/agpl-3.0/), and you have to respect it.\
**Use it only in personal, criminal investigations, pentesting, or open-source projects.**
## Thanks
- [novitae](https://github.com/novitae) for being my Python colleague
- All the people on [Malfrats Industries](https://discord.gg/sg2YcrC6x9) and elsewhere for the beta test !
- The HideAndSec team 💗 (blog : https://hideandsec.sh)
## Sponsors
Thanks to these awesome people for supporting me !
<!-- sponsors --><a href="https://github.com/elweth-sec"><img src="https://github.com/elweth-sec.png" width="50px" alt="elweth-sec" /></a>&nbsp;&nbsp;<a href="https://github.com/BlWasp"><img src="https://github.com/BlWasp.png" width="50px" alt="BlWasp" /></a>&nbsp;&nbsp;<a href="https://github.com/pl4nty"><img src="https://github.com/pl4nty.png" width="50px" alt="pl4nty" /></a>&nbsp;&nbsp;<a href="https://github.com/0xN0x"><img src="https://github.com/0xN0x.png" width="50px" alt="0xN0x" /></a>&nbsp;&nbsp;<a href="https://github.com/C3n7ral051nt4g3ncy"><img src="https://github.com/C3n7ral051nt4g3ncy.png" width="50px" alt="C3n7ral051nt4g3ncy" /></a>&nbsp;&nbsp;<a href="https://github.com/rayanlecat"><img src="https://github.com/rayanlecat.png" width="50px" alt="rayanlecat" /></a>&nbsp;&nbsp;<a href="https://github.com/ajmeese7"><img src="https://github.com/ajmeese7.png" width="50px" alt="ajmeese7" /></a>&nbsp;&nbsp;<!-- sponsors -->
\
You like my work ?\
[Sponsor me](https://github.com/sponsors/mxrch) on GitHub ! 🤗

BIN
assets/long_banner.png Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

View File

@@ -1,256 +0,0 @@
#!/usr/bin/env python3
from lib import modwall; modwall.check() # We check the requirements
import json
from time import time
import os
from os.path import isfile
from pathlib import Path
from ssl import SSLError
import base64
from copy import deepcopy
import httpx
from seleniumwire import webdriver
from selenium.common.exceptions import TimeoutException as SE_TimeoutExepction
from bs4 import BeautifulSoup as bs
import config
from lib.utils import *
from lib import listener
# We change the current working directory to allow using GHunt from anywhere
os.chdir(Path(__file__).parents[0])
def get_saved_cookies():
''' Returns cookie cache if exists '''
if isfile(config.data_path):
try:
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
cookies = out["cookies"]
print("[+] Detected stored cookies, checking it")
return cookies
except Exception:
print("[-] Stored cookies are corrupted\n")
return False
print("[-] No stored cookies found\n")
return False
def get_authorization_source(driver):
''' Returns html source of docs page
if user authorized
'''
driver.get('https://docs.google.com/document/u/0/')
if "myaccount.google.com" in driver.page_source:
return driver.page_source
return None
def save_tokens(gdoc_token, chat_key, chat_auth, internal_token, internal_auth, cac_key, cookies, osid):
'''Ssave tokens to file '''
output = {
"chat_auth": chat_auth, "internal_auth": internal_auth,
"keys": {"gdoc": gdoc_token, "chat": chat_key, "internal": internal_token, "clientauthconfig": cac_key},
"cookies": cookies,
"osids": {
"cloudconsole": osid
}
}
with open(config.data_path, 'w') as f:
f.write(json.dumps(output))
def get_chat_tokens(cookies):
""" Return the API key used by Google Chat for
Internal People API and a generated SAPISID hash."""
chat_key = "AIzaSyB0RaagJhe9JF2mKDpMml645yslHfLI8iA"
chat_auth = f"SAPISIDHASH {gen_sapisidhash(cookies['SAPISID'], 'https://chat.google.com')}"
return (chat_key, chat_auth)
def get_people_tokens(cookies):
""" Return the API key used by Drive for
Internal People API and a generated SAPISID hash."""
people_key = "AIzaSyAy9VVXHSpS2IJpptzYtGbLP3-3_l0aBk4"
people_auth = f"SAPISIDHASH {gen_sapisidhash(cookies['SAPISID'], 'https://drive.google.com')}"
return (people_key, people_auth)
def gen_osid(cookies, domain, service):
req = httpx.get(f"https://accounts.google.com/ServiceLogin?service={service}&osid=1&continue=https://{domain}/&followup=https://{domain}/&authuser=0",
cookies=cookies, headers=config.headers)
body = bs(req.text, 'html.parser')
params = {x.attrs["name"]:x.attrs["value"] for x in body.find_all("input", {"type":"hidden"})}
headers = {**config.headers, **{"Content-Type": "application/x-www-form-urlencoded"}}
req = httpx.post(f"https://{domain}/accounts/SetOSID", cookies=cookies, data=params, headers=headers)
osid_header = [x for x in req.headers["set-cookie"].split(", ") if x.startswith("OSID")]
if not osid_header:
exit("[-] No OSID header detected, exiting...")
osid = osid_header[0].split("OSID=")[1].split(";")[0]
return osid
def get_clientauthconfig_key(cookies):
""" Extract the Client Auth Config API token."""
req = httpx.get("https://console.cloud.google.com",
cookies=cookies, headers=config.headers)
if req.status_code == 200 and "pantheon_apiKey" in req.text:
cac_key = req.text.split('pantheon_apiKey\\x22:')[1].split(",")[0].strip('\\x22')
return cac_key
exit("[-] I can't find the Client Auth Config API...")
def check_cookies(cookies):
wanted = ["authuser", "continue", "osidt", "ifkv"]
req = httpx.get(f"https://accounts.google.com/ServiceLogin?service=cloudconsole&osid=1&continue=https://console.cloud.google.com/&followup=https://console.cloud.google.com/&authuser=0",
cookies=cookies, headers=config.headers)
body = bs(req.text, 'html.parser')
params = [x.attrs["name"] for x in body.find_all("input", {"type":"hidden"})]
for param in wanted:
if param not in params:
return False
return True
def getting_cookies(cookies):
choices = ("You can facilitate configuring GHunt by using the GHunt Companion extension on Firefox, Chrome, Edge and Opera here :\n"
"=> https://github.com/mxrch/ghunt_companion\n\n"
"[1] (Companion) Put GHunt on listening mode (currently not compatible with docker)\n"
"[2] (Companion) Paste base64-encoded cookies\n"
"[3] Enter manually all cookies\n\n"
"Choice => ")
choice = input(choices)
if choice not in ["1","2","3"]:
exit("Please choose a valid choice. Exiting...")
if choice == "1":
received_cookies = listener.run()
cookies = json.loads(base64.b64decode(received_cookies))
elif choice == "2":
received_cookies = input("Paste the cookies here => ")
cookies = json.loads(base64.b64decode(received_cookies))
elif choice == "3":
for name in cookies.keys():
if not cookies[name]:
cookies[name] = input(f"{name} => ").strip().strip('\"')
return cookies
def get_driver(cookies = {}):
driverpath = get_driverpath()
chrome_options = get_chrome_options_args(config.headless)
options = {
'connection_timeout': None # Never timeout, otherwise it floods errors
}
tmprinter.out("Starting browser...")
driver = webdriver.Chrome(
executable_path=driverpath, seleniumwire_options=options,
options=chrome_options
)
driver.header_overrides = config.headers
driver.get('https://www.google.com/robots.txt')
for k, v in cookies.items():
driver.add_cookie({'name': k, 'value': v, 'domain': '.google.com'})
return driver
if __name__ == '__main__':
driver = None
cookies_from_file = get_saved_cookies()
tmprinter = TMPrinter()
cookies = {"SID": "", "SSID": "", "APISID": "", "SAPISID": "", "HSID": "", "LSID": "", "__Secure-3PSID": "", "CONSENT": config.default_consent_cookie, "PREF": config.default_pref_cookie}
new_cookies_entered = False
if not cookies_from_file:
cookies = getting_cookies(cookies)
new_cookies_entered = True
else:
# in case user wants to enter new cookies (example: for new account)
driver = get_driver(cookies_from_file)
html = get_authorization_source(driver)
valid_cookies = check_cookies(cookies_from_file)
valid = False
if html and valid_cookies:
print("[+] The cookies seems valid !")
valid = True
else:
print("[-] Seems like the cookies are invalid.")
new_gen_inp = input("\nDo you want to enter new browser cookies from accounts.google.com ? (Y/n) ").lower()
if new_gen_inp in ["y", ""]:
cookies = getting_cookies(cookies)
new_cookies_entered = True
elif not valid:
exit("Please put valid cookies. Exiting...")
if not driver:
driver = get_driver(cookies)
# Validate cookies
if new_cookies_entered or not cookies_from_file:
html = get_authorization_source(driver)
if html:
print("\n[+] The cookies seems valid !")
else:
exit("\n[-] Seems like the cookies are invalid, try regenerating them.")
if not new_cookies_entered:
cookies = cookies_from_file
choice = input("Do you want to generate new tokens ? (Y/n) ").lower()
if choice not in ["y", ""]:
exit()
# Start the extraction process
print("Extracting the tokens...\n")
# Extracting Google Docs token
trigger = '\"token\":\"'
if trigger not in html:
exit("[-] I can't find the Google Docs token in the source code...\n")
else:
gdoc_token = html.split(trigger)[1][:100].split('"')[0]
print("Google Docs Token => {}".format(gdoc_token))
print("Generating OSID for the Cloud Console...")
osid = gen_osid(cookies, "console.cloud.google.com", "cloudconsole")
cookies_with_osid = deepcopy(cookies)
cookies_with_osid["OSID"] = osid
# Extracting Internal People API tokens
people_key, people_auth = get_people_tokens(cookies_with_osid)
print(f"People API Key => {people_key}")
print(f"People API Auth => {people_auth}")
# Extracting Chat tokens
chat_key, chat_auth = get_chat_tokens(cookies_with_osid)
print(f"Chat Key => {chat_key}")
print(f"Chat Auth => {chat_auth}")
cac_key = get_clientauthconfig_key(cookies_with_osid)
print(f"Client Auth Config API Key => {cac_key}")
save_tokens(gdoc_token, chat_key, chat_auth, people_key, people_auth, cac_key, cookies, osid)

View File

@@ -1,30 +0,0 @@
regexs = {
"albums": r'href=\"\.\/albumarchive\/\d*?\/album\/(.*?)\" jsaction.*?>(?:<.*?>){5}(.*?)<\/div><.*?>(\d*?) ',
"photos": r'\],\"(https:\/\/lh\d\.googleusercontent\.com\/.*?)\",\[\"\d{21}\"(?:.*?,){16}\"(.*?)\"',
"review_loc_by_id": r'{}\",.*?\[\[null,null,(.*?),(.*?)\]',
"gplus": r"plus\.google\.com\/\d*\""
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Connection': 'Keep-Alive'
}
headless = True # if True, it doesn't show the browser while scraping GMaps reviews
ytb_hunt_always = True # if True, search the Youtube channel everytime
gmaps_radius = 30 # in km. The radius distance to create groups of gmaps reviews.
gdocs_public_doc = "1jaEEHZL32t1RUN5WuZEnFpqiEPf_APYKrRBG9LhLdvE" # The public Google Doc to use it as an endpoint, to use Google's Search.
data_path = "resources/data.txt"
browser_waiting_timeout = 120
# Profile pictures options
write_profile_pic = True
profile_pics_dir = "profile_pics"
# Cookies
# if True, it will uses the Google Account cookies to request the services,
# and gonna be able to read your personal informations
gmaps_cookies = False
calendar_cookies = False
default_consent_cookie = "YES+FR.fr+V10+BX"
default_pref_cookie = "tz=Europe.Paris&f6=40000000&hl=en" # To set the lang settings to english

View File

@@ -1,4 +0,0 @@
from webdriver_manager.chrome import ChromeDriverManager
ChromeDriverManager(path="/usr/src/app").install()
print('ChromeDriver download was successful.')

View File

@@ -1,2 +0,0 @@
#!/bin/bash
docker run -v ghunt-resources:/usr/src/app/resources -ti ghcr.io/mxrch/ghunt check_and_gen.py

View File

@@ -1,2 +0,0 @@
#!/bin/bash
docker run -v ghunt-resources:/usr/src/app/resources -ti ghcr.io/mxrch/ghunt ghunt.py $1 $2

View File

@@ -0,0 +1,20 @@
import httpx
import trio
import sys
from ghunt.helpers.gmail import is_email_registered
async def main():
if not sys.argv[1:]:
exit("Please give an email address.")
as_client = httpx.AsyncClient() # Async Client
email = sys.argv[1]
is_registered = await is_email_registered(as_client, email)
print("Registered on Google :", is_registered)
trio.run(main) # running our async code in a non-async code

View File

@@ -0,0 +1,38 @@
import httpx
import trio
import sys
from ghunt.apis.peoplepa import PeoplePaHttp
from ghunt.objects.base import GHuntCreds
async def main():
if not sys.argv[1:]:
exit("Please give an email address.")
email = sys.argv[1]
ghunt_creds = GHuntCreds()
ghunt_creds.load_creds() # Check creds (but it doesn't crash if they are invalid)
as_client = httpx.AsyncClient() # Async client
people_api = PeoplePaHttp(ghunt_creds)
found, person = await people_api.people_lookup(as_client, email, params_template="just_name")
# You can have multiple "params_template" for the GHunt APIs,
# for example, on this endpoint, you have "just_gaia_id" by default,
# "just_name" or "max_details" which is used in the email CLI module.
print("Found :", found)
if found:
if "PROFILE" in person.names: # A specification of People API, there are different containers
# A target may not exists globally, but only in your contacts,
# so it will show you only the CONTACT container,
# with the informations you submitted.
# What we want here is the PROFILE container, with public infos.
print("Name :", person.names["PROFILE"].fullname)
else:
print("Not existing globally.")
trio.run(main) # running our async code in a non-async code

View File

@@ -1,44 +0,0 @@
#!/usr/bin/env python3
from lib import modwall; modwall.check() # We check the requirements
import sys
import os
from pathlib import Path
from lib import modwall
from lib.utils import *
from modules.doc import doc_hunt
from modules.email import email_hunt
from modules.gaia import gaia_hunt
from modules.youtube import youtube_hunt
if __name__ == "__main__":
# We change the current working directory to allow using GHunt from anywhere
os.chdir(Path(__file__).parents[0])
modules = ["email", "doc", "gaia", "youtube"]
if len(sys.argv) <= 1 or sys.argv[1].lower() not in modules:
print("Please choose a module.\n")
print("Available modules :")
for module in modules:
print(f"- {module}")
exit()
module = sys.argv[1].lower()
if len(sys.argv) >= 3:
data = sys.argv[2]
else:
data = None
if module == "email":
email_hunt(data)
elif module == "doc":
doc_hunt(data)
elif module == "gaia":
gaia_hunt(data)
elif module == "youtube":
youtube_hunt(data)

1
ghunt/__init__.py Normal file
View File

@@ -0,0 +1 @@
from ghunt import globals as gb; gb.init_globals()

View File

103
ghunt/apis/calendar.py Normal file
View File

@@ -0,0 +1,103 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.calendar import Calendar, CalendarEvents
import httpx
from typing import *
import inspect
import json
from datetime import datetime, timezone
class CalendarHttp(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {}
headers = {**headers, **base_headers}
self.hostname = "clients6.google.com"
self.scheme = "https"
self.authentication_mode = "sapisidhash" # sapisidhash, cookies_only, oauth or None
self.require_key = "calendar" # key name, or None
self._load_api(creds, headers)
async def get_calendar(self, as_client: httpx.AsyncClient, calendar_id: str) -> Tuple[bool, Calendar]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/calendar/v3/calendars/{calendar_id}"
data_type = None # json, data or None
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, None, None, data_type)
# Parsing
data = json.loads(req.text)
calendar = Calendar()
if "error" in data:
return False, calendar
calendar._scrape(data)
return True, calendar
async def get_events(self, as_client: httpx.AsyncClient, calendar_id: str, params_template="next_events",
time_min=datetime.today().replace(tzinfo=timezone.utc).isoformat(), max_results=250, page_token="") -> Tuple[bool, CalendarEvents]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/calendar/v3/calendars/{calendar_id}/events"
data_type = None # json, data or None
params_templates = {
"next_events": {
"calendarId": calendar_id,
"singleEvents": True,
"maxAttendees": 1,
"maxResults": max_results,
"timeMin": time_min # ISO Format
},
"from_beginning": {
"calendarId": calendar_id,
"singleEvents": True,
"maxAttendees": 1,
"maxResults": max_results
},
"max_from_beginning": {
"calendarId": calendar_id,
"singleEvents": True,
"maxAttendees": 1,
"maxResults": 2500 # Max
}
}
if not params_templates.get(params_template):
raise GHuntParamsTemplateError(f"The asked template {params_template} for the endpoint {endpoint_name} wasn't recognized by GHunt.")
params = params_templates[params_template]
if page_token:
params["pageToken"] = page_token
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
events = CalendarEvents()
if not data:
return False, events
events._scrape(data)
return True, events

View File

@@ -0,0 +1,57 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.clientauthconfig import CacBrand
import httpx
from typing import *
import inspect
import json
class ClientAuthConfigHttp(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {}
headers = {**headers, **base_headers}
self.hostname = "clientauthconfig.googleapis.com"
self.scheme = "https"
self.authentication_mode = None # sapisidhash, cookies_only, oauth or None
self.require_key = "pantheon" # key name, or None
self._load_api(creds, headers)
async def get_brand(self, as_client: httpx.AsyncClient, project_number: int) -> Tuple[bool, CacBrand]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/v1/brands/lookupkey/brand/{project_number}"
data_type = None # json, data or None
params = {
"readMask": "*",
"$outputDefaults": True
}
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
brand = CacBrand()
if "error" in data:
return False, brand
brand._scrape(data)
return True, brand

125
ghunt/apis/drive.py Normal file
View File

@@ -0,0 +1,125 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.drive import DriveCommentList, DriveFile, DriveChildList
from ghunt.knowledge import drive as drive_knowledge
import httpx
from typing import *
import inspect
import json
class DriveHttp(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {}
headers = {**headers, **base_headers}
# Android OAuth fields
self.api_name = "drive"
self.package_name = "com.google.android.apps.docs"
self.scopes = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.file"
]
self.hostname = "www.googleapis.com"
self.scheme = "https"
self.authentication_mode = "oauth" # sapisidhash, cookies_only, oauth or None
self.require_key = None # key name, or None
self._load_api(creds, headers)
async def get_file(self, as_client: httpx.AsyncClient, file_id: str) -> Tuple[bool, DriveFile]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/drive/v2internal/files/{file_id}"
data_type = None # json, data or None
params = {
"fields": ','.join(drive_knowledge.request_fields),
"supportsAllDrives": True
}
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
drive_file = DriveFile()
if "error" in data:
return False, drive_file
drive_file._scrape(data)
return True, drive_file
async def get_comments(self, as_client: httpx.AsyncClient, file_id: str, page_token: str="") -> Tuple[bool, str, DriveCommentList]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/drive/v2internal/files/{file_id}/comments"
data_type = None # json, data or None
params = {
"supportsAllDrives": True,
"maxResults": 100
}
if page_token:
params["pageToken"] = page_token
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
drive_comments = DriveCommentList()
if "error" in data:
return False, "", drive_comments
next_page_token = data.get("nextPageToken", "")
drive_comments._scrape(data)
return True, next_page_token, drive_comments
async def get_childs(self, as_client: httpx.AsyncClient, file_id: str, page_token: str="") -> Tuple[bool, str, DriveChildList]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/drive/v2internal/files/{file_id}/children"
data_type = None # json, data or None
params = {
"supportsAllDrives": True,
"maxResults": 1000
}
if page_token:
params["pageToken"] = page_token
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
drive_childs = DriveChildList()
if "error" in data:
return False, "", drive_childs
next_page_token = data.get("nextPageToken", "")
drive_childs._scrape(data)
return True, next_page_token, drive_childs

171
ghunt/apis/peoplepa.py Normal file
View File

@@ -0,0 +1,171 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.people import Person
import httpx
from typing import *
import inspect
import json
class PeoplePaHttp(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {}
headers = {**headers, **base_headers}
self.hostname = "people-pa.clients6.google.com"
self.scheme = "https"
self.authentication_mode = "sapisidhash" # sapisidhash, cookies_only, oauth or None
self.require_key = "photos" # key name, or None
self._load_api(creds, headers)
async def people_lookup(self, as_client: httpx.AsyncClient, email: str, params_template="just_gaia_id") -> Tuple[bool, Person]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = "/v2/people/lookup"
data_type = None # json, data or None
params_templates = {
"just_gaia_id": {
"id": email,
"type": "EMAIL",
"matchType": "EXACT",
"requestMask.includeField.paths": "person.metadata"
},
"just_name": {
"id": email,
"type": "EMAIL",
"matchType": "EXACT",
"requestMask.includeField.paths": "person.name",
"core_id_params.enable_private_names": True
},
"max_details": {
"id": email,
"type": "EMAIL",
"match_type": "EXACT",
"extension_set.extension_names": [
"DYNAMITE_ADDITIONAL_DATA",
"DYNAMITE_ORGANIZATION_INFO",
"GPLUS_ADDITIONAL_DATA"
],
"request_mask.include_field.paths": [
"person.metadata.best_display_name",
"person.photo",
"person.cover_photo",
"person.interaction_settings",
"person.legacy_fields",
"person.metadata",
"person.in_app_reachability",
"person.name",
"person.read_only_profile_info",
"person.sort_keys",
"person.email"
],
"request_mask.include_container": [
"AFFINITY",
"PROFILE",
"DOMAIN_PROFILE",
"ACCOUNT",
"EXTERNAL_ACCOUNT",
"CIRCLE",
"DOMAIN_CONTACT",
"DEVICE_CONTACT",
"GOOGLE_GROUP",
"CONTACT"
],
"core_id_params.enable_private_names": True
}
}
if not params_templates.get(params_template):
raise GHuntParamsTemplateError(f"The asked template {params_template} for the endpoint {endpoint_name} wasn't recognized by GHunt.")
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params_templates[params_template], None, data_type)
# Parsing
data = json.loads(req.text)
person = Person()
if not data:
return False, person
person_data = list(data["people"].values())[0]
await person._scrape(as_client, person_data)
return True, person
async def people(self, as_client: httpx.AsyncClient, gaia_id: str, params_template="just_name") -> Tuple[bool, Person]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = "/v2/people"
data_type = None # json, data or None
params_templates = {
"just_name": {
"person_id": gaia_id,
"requestMask.includeField.paths": "person.name",
"core_id_params.enable_private_names": True
},
"max_details": {
"person_id": gaia_id,
"extension_set.extension_names": [
"DYNAMITE_ADDITIONAL_DATA",
"DYNAMITE_ORGANIZATION_INFO",
"GPLUS_ADDITIONAL_DATA"
],
"request_mask.include_field.paths": [
"person.metadata.best_display_name",
"person.photo",
"person.cover_photo",
"person.interaction_settings",
"person.legacy_fields",
"person.metadata",
"person.in_app_reachability",
"person.name",
"person.read_only_profile_info",
"person.sort_keys",
"person.email"
],
"request_mask.include_container": [
"AFFINITY",
"PROFILE",
"DOMAIN_PROFILE",
"ACCOUNT",
"EXTERNAL_ACCOUNT",
"CIRCLE",
"DOMAIN_CONTACT",
"DEVICE_CONTACT",
"GOOGLE_GROUP",
"CONTACT"
],
"core_id_params.enable_private_names": True
}
}
if not params_templates.get(params_template):
raise GHuntParamsTemplateError(f"The asked template {params_template} for the endpoint {endpoint_name} wasn't recognized by GHunt.")
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params_templates[params_template], None, data_type)
# Parsing
data = json.loads(req.text)
person = Person()
if data["personResponse"][0]["status"] == "NOT_FOUND":
return False, person
person_data = data["personResponse"][0]["person"]
await person._scrape(as_client, person_data)
return True, person

125
ghunt/apis/playgames.py Normal file
View File

@@ -0,0 +1,125 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.playgames import PlayedGames, PlayerAchievements, PlayerProfile
import httpx
from typing import *
import inspect
import json
class PlayGames(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {}
headers = {**headers, **base_headers}
# Android OAuth fields
self.api_name = "playgames"
self.package_name = "com.google.android.play.games"
self.scopes = [
"https://www.googleapis.com/auth/games.firstparty",
"https://www.googleapis.com/auth/googleplay"
]
self.hostname = "www.googleapis.com"
self.scheme = "https"
self.authentication_mode = "oauth" # sapisidhash, cookies_only, oauth or None
self.require_key = None # key name, or None
self._load_api(creds, headers)
async def get_profile(self, as_client: httpx.AsyncClient, player_id: str) -> Tuple[bool, PlayerProfile]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/games/v1whitelisted/players/{player_id}"
data_type = None # json, data or None
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, None, None, data_type)
# Parsing
data = json.loads(req.text)
player_profile = PlayerProfile()
if not "displayPlayer" in data:
return False, player_profile
player_profile._scrape(data["displayPlayer"])
player_profile.id = player_id
return True, player_profile
async def get_played_games(self, as_client: httpx.AsyncClient, player_id: str, page_token: str="") -> Tuple[bool, str, PlayedGames]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "GET"
base_url = f"/games/v1whitelisted/players/{player_id}/applications/played"
data_type = None # json, data or None
params = {}
if page_token:
params = {"pageToken": page_token}
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, None, data_type)
# Parsing
data = json.loads(req.text)
played_games = PlayedGames()
if not "items" in data:
print(req)
print(req.text)
return False, "", played_games
next_page_token = data.get("nextPageToken", "")
played_games._scrape(data["items"])
return True, next_page_token, played_games
async def get_achievements(self, as_client: httpx.AsyncClient, player_id: str, page_token: str="") -> Tuple[bool, str, PlayerAchievements]:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "POST"
base_url = f"/games/v1whitelisted/players/{player_id}/achievements"
data_type = "json" # json, data or None
params = {
"state": "UNLOCKED",
"returnDefinitions": True,
"sortOrder": "RECENT_FIRST"
}
data = {}
if page_token:
params["pageToken"] = page_token
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, params, data, data_type)
# Parsing
data = json.loads(req.text)
achievements = PlayerAchievements()
if not "items" in data:
print(req)
print(req.text)
return False, "", achievements
next_page_token = ""
if "nextPageToken" in data:
next_page_token = data["nextPageToken"]
achievements._scrape(data)
return True, next_page_token, achievements

118
ghunt/apis/playgateway.py Normal file
View File

@@ -0,0 +1,118 @@
from ghunt.objects.apis import GAPI
from ghunt.objects.base import GHuntCreds
from ghunt import globals as gb
from ghunt.protos.playgatewaypa.search_player_pb2 import PlayerSearchProto
from ghunt.protos.playgatewaypa.search_player_results_pb2 import PlayerSearchResultsProto
from ghunt.protos.playgatewaypa.get_player_pb2 import GetPlayerProto
from ghunt.protos.playgatewaypa.get_player_response_pb2 import GetPlayerResponseProto
from ghunt.parsers.playgateway import PlayerSearchResults
from ghunt.parsers.playgateway import PlayerProfile
import httpx
from typing import *
from struct import pack
import inspect
class PlayGatewayPaGrpc(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
# Android OAuth fields
self.api_name = "playgames"
self.package_name = "com.google.android.play.games"
self.scopes = [
"https://www.googleapis.com/auth/games.firstparty",
"https://www.googleapis.com/auth/googleplay"
]
if not headers:
headers = gb.config.android_headers
headers["User-Agent"] = headers["User-Agent"].format(self.package_name)
headers = {**headers, **{
"Content-Type": "application/grpc",
"Te": "trailers"
}}
# Normal fields
self.hostname = "playgateway-pa.googleapis.com"
self.scheme = "https"
self.authentication_mode = "oauth" # sapisidhash, cookies_only, oauth or None
self.require_key = None # key name, or None
self._load_api(creds, headers)
async def search_player(self, as_client: httpx.AsyncClient, query: str) -> PlayerSearchResults:
endpoint_name = inspect.currentframe().f_code.co_name
verb = "POST"
base_url = "/play.gateway.adapter.interplay.v1.PlayGatewayInterplayService/GetPage"
data_type = "data"
ext_metadata = {
"bin": {
"158709649": "CggaBgj22K2aARo4EgoI+aKnlZf996E/GhcQHhoPUkQyQS4yMTEwMDEuMDAyIgIxMToICgZJZ0pHVWdCB1BpeGVsIDU",
"173715354": "CgEx"
}
}
player_search = PlayerSearchProto()
player_search.search_form.query.text = query
payload = player_search.SerializeToString()
prefix = bytes(1) + pack(">i", len(payload))
data = prefix + payload
self._load_endpoint(endpoint_name, {}, ext_metadata)
req = await self._query(as_client, verb, endpoint_name, base_url, None, data, data_type)
# Parsing
player_search_results = PlayerSearchResultsProto()
player_search_results.ParseFromString(req.content[5:])
parser = PlayerSearchResults()
parser._scrape(player_search_results)
return parser
async def get_player_stats(self, as_client: httpx.AsyncClient, player_id: str) -> PlayerProfile:
"""
This endpoint client isn't finished, it is only used to get total played applications & achievements count.
To get all the details about a player, please use get_player method of PlayGames (HTTP API).
"""
endpoint_name = inspect.currentframe().f_code.co_name
verb = "POST"
base_url = "/play.gateway.adapter.interplay.v1.PlayGatewayInterplayService/GetPage"
data_type = "data"
ext_metadata = {
"bin": {
"158709649": "CggaBgj22K2aARo4EgoI+aKnlZf996E/GhcQHhoPUkQyQS4yMTEwMDEuMDAyIgIxMToICgZJZ0pHVWdCB1BpeGVsIDU",
"173715354": "CgEx"
}
}
player_profile = GetPlayerProto()
player_profile.form.query.id = player_id
payload = player_profile.SerializeToString()
prefix = bytes(1) + pack(">i", len(payload))
data = prefix + payload
self._load_endpoint(endpoint_name, {}, ext_metadata)
req = await self._query(as_client, verb, endpoint_name, base_url, None, data, data_type)
# Parsing
player_profile = GetPlayerResponseProto()
player_profile.ParseFromString(req.content[5:])
parser = PlayerProfile()
parser._scrape(player_profile)
return parser

98
ghunt/apis/vision.py Normal file
View File

@@ -0,0 +1,98 @@
from ghunt.objects.base import GHuntCreds
from ghunt.errors import *
import ghunt.globals as gb
from ghunt.objects.apis import GAPI
from ghunt.parsers.vision import VisionFaceDetection
import httpx
from typing import *
import inspect
import json
class VisionHttp(GAPI):
def __init__(self, creds: GHuntCreds, headers: Dict[str, str] = {}):
super().__init__()
if not headers:
headers = gb.config.headers
base_headers = {
"X-Origin": "https://explorer.apis.google.com"
}
headers = {**headers, **base_headers}
self.hostname = "content-vision.googleapis.com"
self.scheme = "https"
self.authentication_mode = None # sapisidhash, cookies_only, oauth or None
self.require_key = "apis_explorer" # key name, or None
self.key_origin = "https://content-vision.googleapis.com"
self._load_api(creds, headers)
async def detect_faces(self, as_client: httpx.AsyncClient, image_url: str = "", image_content: str = "",
params_template="default") -> Tuple[bool, bool, VisionFaceDetection]:
endpoint_name = inspect.currentframe().f_code.co_name
# image_url can cause errors with vision_api, so we prefer using image_content
# See => https://cloud.google.com/vision/docs/detecting-faces?#detect_faces_in_a_remote_image
verb = "POST"
base_url = "/v1/images:annotate"
data_type = "json" # json, data or None
params_templates = {
"default": {
"requests":[
{
"features": [
{
"maxResults":100,
"type":"FACE_DETECTION"
}
],
"image": {}
}
]
}
}
if not params_templates.get(params_template):
raise GHuntParamsTemplateError(f"The asked template {params_template} for the endpoint {endpoint_name} wasn't recognized by GHunt.")
# Inputs checks
if image_url and image_content:
raise GHuntParamsInputError("[Vision API faces detection] image_url and image_content can't be both put at the same time.")
elif not image_url and not image_content:
raise GHuntParamsInputError("[Vision API faces detection] Please choose at least one parameter between image_url and image_content.")
if image_url:
params_templates["default"]["requests"][0]["image"] = {
"source": {
"imageUri": image_url
}
}
elif image_content:
params_templates["default"]["requests"][0]["image"] = {
"content": image_content
}
self._load_endpoint(endpoint_name)
req = await self._query(as_client, verb, endpoint_name, base_url, None, params_templates[params_template], data_type)
rate_limited = req.status_code == 429 # API Explorer sometimes rate-limit because they set their DefaultRequestsPerMinutePerProject to 1800
vision_face_detection = VisionFaceDetection()
if rate_limited:
return rate_limited, False, vision_face_detection
# Parsing
data = json.loads(req.text)
if not data["responses"][0]:
return rate_limited, False, vision_face_detection
vision_data = data["responses"][0]
vision_face_detection._scrape(vision_data)
return rate_limited, True, vision_face_detection

47
ghunt/cli.py Normal file
View File

@@ -0,0 +1,47 @@
import argparse
from typing import *
import sys
def parse_and_run():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="module")
### Login module
parser_login = subparsers.add_parser('login', help="Authenticate GHunt to Google.")
parser_login.add_argument('--clean', action='store_true', help="Clear credentials local file.")
### Email module
parser_email = subparsers.add_parser('email', help="Get information on an email address.")
parser_email.add_argument("email_address")
parser_email.add_argument('--json', type=str, help="File to write the JSON output to.")
### Gaia module
parser_gaia = subparsers.add_parser('gaia', help="Get information on a Gaia ID.")
parser_gaia.add_argument("gaia_id")
parser_gaia.add_argument('--json', type=str, help="File to write the JSON output to.")
### Drive module
parser_drive = subparsers.add_parser('drive', help="Get information on a Drive file or folder.")
parser_drive.add_argument("file_id", help="Example: 1N__vVu4c9fCt4EHxfthUNzVOs_tp8l6tHcMBnpOZv_M")
parser_drive.add_argument('--json', type=str, help="File to write the JSON output to.")
### Parsing
args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
process_args(args)
def process_args(args: argparse.Namespace):
import trio
match args.module:
case "login":
from ghunt.modules import login
trio.run(login.check_and_login, None, args.clean)
case "email":
from ghunt.modules import email
trio.run(email.hunt, None, args.email_address, args.json)
case "gaia":
from ghunt.modules import gaia
trio.run(gaia.hunt, None, args.gaia_id, args.json)
case "drive":
from ghunt.modules import drive
trio.run(drive.hunt, None, args.file_id, args.json)

29
ghunt/config.py Normal file
View File

@@ -0,0 +1,29 @@
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Connection': 'Keep-Alive'
}
android_headers = {
'User-Agent': '{}/323710070 (Linux; U; Android 11; fr_FR; Pixel 5; Build/RD2A.211001.002; Cronet/97.0.4692.70) grpc-java-cronet/1.44.0-SNAPSHOT', # android package name
'Connection': 'Keep-Alive'
}
templates = {
"gmaps_pb":{
"stats": "!1s{}!2m3!1sYE3rYc2rEsqOlwSHx534DA!7e81!15i14416!6m2!4b1!7b1!9m0!16m4!1i100!4b1!5b1!6BQ0FFU0JrVm5TVWxEenc9PQ!17m28!1m6!1m2!1i0!2i0!2m2!1i458!2i736!1m6!1m2!1i1868!2i0!2m2!1i1918!2i736!1m6!1m2!1i0!2i0!2m2!1i1918!2i20!1m6!1m2!1i0!2i716!2m2!1i1918!2i736!18m12!1m3!1d806313.5865720833!2d150.19484835!3d-34.53825215!2m3!1f0!2f0!3f0!3m2!1i1918!2i736!4f13.1",
"reviews": {
"first": "!1s{}!2m5!1soViSYcvVG6iJytMPk6amiA8%3A1!2zMWk6NCx0OjE0MzIzLGU6MCxwOm9WaVNZY3ZWRzZpSnl0TVBrNmFtaUE4OjE!4m1!2i14323!7e81!6m2!4b1!7b1!9m0!10m6!1b1!2b1!5b1!8b1!9m1!1e3!14m69!1m57!1m4!1m3!1e3!1e2!1e4!3m5!2m4!3m3!1m2!1i260!2i365!4m1!3i10!10b1!11m42!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e10!2b0!3e4!2b1!4b1!2m5!1e1!1e4!1e3!1e5!1e2!3b0!4b1!5m1!1e1!7b1!16m3!1i10!4b1!5b1!17m0!18m9!1m3!1d2567.508024970022!2d-78.667885!3d35.7546725!2m0!3m2!1i537!2i609!4f13.1",
"page": "!1s{}!2m3!1sYE3rYc2rEsqOlwSHx534DA!7e81!15i14416!6m2!4b1!7b1!9m0!16m4!1i100!4b1!5b1!6B{}!17m28!1m6!1m2!1i0!2i0!2m2!1i458!2i736!1m6!1m2!1i1868!2i0!2m2!1i1918!2i736!1m6!1m2!1i0!2i0!2m2!1i1918!2i20!1m6!1m2!1i0!2i716!2m2!1i1918!2i736!18m12!1m3!1d806313.5865720833!2d150.19484835!3d-34.53825215!2m3!1f0!2f0!3f0!3m2!1i1918!2i736!4f13.1"
},
"photos": {
"first": "!1s{}!2m3!1spQUAYoPQLcOTlwT9u6-gDA!7e81!15i18404!9m0!14m69!1m57!1m4!1m3!1e3!1e2!1e4!3m5!2m4!3m3!1m2!1i260!2i365!4m1!3i10!10b1!11m42!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e10!2b0!3e4!2b1!4b1!2m5!1e1!1e4!1e3!1e5!1e2!3b1!4b1!5m1!1e1!7b1",
"page": "!1s{}!2m3!1spQUAYoPQLcOTlwT9u6-gDA!7e81!15i14415!9m0!14m68!1m58!1m4!1m3!1e3!1e2!1e4!3m5!2m4!3m3!1m2!1i260!2i365!4m2!2s{}!3i100!10b1!11m42!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e10!2b0!3e4!2b1!4b1!2m5!1e1!1e4!1e3!1e5!1e2!5m1!1e1!7b1!17m28!1m6!1m2!1i0!2i0!2m2!1i458!2i595!1m6!1m2!1i950!2i0!2m2!1i1000!2i595!1m6!1m2!1i0!2i0!2m2!1i1000!2i20!1m6!1m2!1i0!2i575!2m2!1i1000!2i595!18m12!1m3!1d1304345.2752527467!2d149.32871599857805!3d-34.496155324132545!2m3!1f0!2f0!3f0!3m2!1i1000!2i595!4f13.1"
}
}
}
gmaps_radius = 30 # in km. The radius distance to create groups of gmaps reviews.
# Cookies
default_consent_cookie = "YES+cb.20220118-08-p0.fr+FX+510"
default_pref_cookie = "tz=Europe.Paris&f6=40000000&hl=en" # To set the lang settings to english

44
ghunt/errors.py Normal file
View File

@@ -0,0 +1,44 @@
class GHuntKnowledgeError(BaseException):
pass
class GHuntCorruptedHeadersError(BaseException):
pass
class GHuntUnknownVerbError(BaseException):
pass
class GHuntUnknownRequestDataTypeError(BaseException):
pass
class GHuntInsufficientCreds(BaseException):
pass
class GHuntParamsTemplateError(BaseException):
pass
class GHuntParamsInputError(BaseException):
pass
class GHuntAPIResponseParsingError(BaseException):
pass
class GHuntObjectsMergingError(BaseException):
pass
class GHuntAndroidMasterAuthError(BaseException):
pass
class GHuntAndroidAppOAuth2Error(BaseException):
pass
class GHuntOSIDAuthError(BaseException):
pass
class GHuntCredsNotLoaded(BaseException):
pass
class GHuntNotAuthenticated(BaseException):
pass
class GHuntInvalidTarget(BaseException):
pass

15
ghunt/ghunt.py Normal file
View File

@@ -0,0 +1,15 @@
import sys
def main():
version = sys.version_info
if (version < (3, 10)):
print('[-] GHunt only works with Python 3.10+.')
print(f'Your current Python version : {version.major}.{version.minor}.{version.micro}')
sys.exit(1)
from ghunt.cli import parse_and_run
from ghunt.helpers.banner import show_banner
show_banner()
parse_and_run()

12
ghunt/globals.py Normal file
View File

@@ -0,0 +1,12 @@
# This file is only intended to serve global variables at a project-wide level.
def init_globals():
from ghunt.objects.utils import TMPrinter
from rich.console import Console
global config, tmprinter, rc
from ghunt import config
tmprinter = TMPrinter()
rc = Console(highlight=False) # Rich Console

159
ghunt/helpers/auth.py Normal file
View File

@@ -0,0 +1,159 @@
import json
import base64
from typing import *
from copy import deepcopy
import httpx
from bs4 import BeautifulSoup as bs
from ghunt import globals as gb
from ghunt.errors import *
from ghunt.helpers.utils import *
from ghunt.helpers import listener
from ghunt.helpers.knowledge import get_domain_of_service, get_package_sig
from ghunt.helpers.auth import *
async def android_master_auth(as_client: httpx.AsyncClient, oauth_token: str) -> Tuple[str, List[str], str, str]:
"""
Takes an oauth_token to perform an android authentication
to get the master token and other informations.
Returns the master token, connected services, account email and account full name.
"""
data = {
"Token": oauth_token,
"service": "ac2dm",
"get_accountid": 1,
"ACCESS_TOKEN": 1,
"add_account": 1,
"callerSig": "38918a453d07199354f8b19af05ec6562ced5788"
}
req = await as_client.post("https://android.googleapis.com/auth", data=data)
resp = parse_oauth_flow_response(req.text)
for keyword in ["Token", "Email", "services", "firstName", "lastName"]:
if keyword not in resp:
raise GHuntAndroidMasterAuthError(f'Expected "{keyword}" in the response of the Android Master Authentication.\nThe oauth_token may be expired.')
return resp["Token"], resp["services"].split(","), resp["Email"], f'{resp["firstName"]} {resp["lastName"]}'
async def android_oauth_app(as_client: httpx.AsyncClient, master_token: str,
package_name: str, scopes: List[str]) -> Tuple[str, List[str], int]:
"""
Uses the master token to ask for an authorization token,
with specific scopes and app package name.
Returns the authorization token, granted scopes and expiry UTC timestamp.
"""
client_sig = get_package_sig(package_name)
data = {
"app": package_name,
"service": f"oauth2:{' '.join(scopes)}",
"client_sig": client_sig,
"Token": master_token
}
req = await as_client.post("https://android.googleapis.com/auth", data=data)
resp = parse_oauth_flow_response(req.text)
for keyword in ["Expiry", "grantedScopes", "Auth"]:
if keyword not in resp:
raise GHuntAndroidAppOAuth2Error(f'Expected "{keyword}" in the response of the Android App OAuth2 Authentication.\nThe master token may be revoked.')
return resp["Auth"], resp["grantedScopes"].split(" "), int(resp["Expiry"])
async def gen_osids(cookies: Dict[str, str], osids: List[str]) -> Dict[str, str]:
"""
Generate OSIDs of given services names,
contained in the "osids" dict argument.
"""
generated_osids = {}
for service in osids:
sample_cookies = deepcopy(cookies)
domain = get_domain_of_service(service)
req = httpx.get(f"https://accounts.google.com/ServiceLogin?service={service}&osid=1&continue=https://{domain}/&followup=https://{domain}/&authuser=0",
cookies=cookies, headers=gb.config.headers)
for cookie in ["__Host-GAPS", "SIDCC", "__Secure-3PSIDCC"]:
sample_cookies[cookie] = req.cookies[cookie]
body = bs(req.text, 'html.parser')
params = {x.attrs["name"]:x.attrs["value"] for x in body.find_all("input", {"type":"hidden"})}
headers = {**gb.config.headers, **{"Content-Type": "application/x-www-form-urlencoded"}}
req = httpx.post(f"https://{domain}/accounts/SetOSID", cookies=cookies, data=params, headers=headers)
if not "OSID" in req.cookies:
raise GHuntOSIDAuthError("[-] No OSID header detected, exiting...")
generated_osids[service] = req.cookies["OSID"]
return generated_osids
def check_cookies(cookies: Dict[str, str]) -> bool:
"""Checks the validity of given cookies."""
req = httpx.get("https://docs.google.com", cookies=cookies, headers=gb.config.headers)
if req.status_code != 307:
return False
set_cookies = extract_set_cookies(req)
if any([cookie in set_cookies for cookie in cookies]):
return False
return True
def check_osids(cookies: Dict[str, str], osids: Dict[str, str]) -> bool:
"""Checks the validity of given OSIDs."""
for service in osids:
domain = get_domain_of_service(service)
cookies_with_osid = inject_osid(cookies, osids, service)
wanted = ["authuser", "continue", "osidt", "ifkv"]
req = httpx.get(f"https://accounts.google.com/ServiceLogin?service={service}&osid=1&continue=https://{domain}/&followup=https://{domain}/&authuser=0",
cookies=cookies_with_osid, headers=gb.config.headers)
body = bs(req.text, 'html.parser')
params = [x.attrs["name"] for x in body.find_all("input", {"type":"hidden"})]
if not all([param in wanted for param in params]):
return False
return True
async def check_master_token(as_client: httpx.AsyncClient, master_token: str) -> str:
"""Checks the validity of the android master token."""
try:
await android_oauth_app(as_client, master_token, "com.google.android.play.games", ["https://www.googleapis.com/auth/games.firstparty"])
except GHuntAndroidAppOAuth2Error:
return False
return True
async def getting_cookies_dialog(cookies: Dict[str, str]) -> Tuple[Dict[str, str], str] :
"""
Launch the dialog that asks the user
how he want to generate its credentials.
"""
choices = ("You can facilitate configuring GHunt by using the GHunt Companion extension on Firefox, Chrome, Edge and Opera here :\n"
"=> https://github.com/mxrch/ghunt_companion\n\n"
"[1] (Companion) Put GHunt on listening mode (currently not compatible with docker)\n"
"[2] (Companion) Paste base64-encoded cookies\n"
"[3] Enter manually all cookies\n\n"
"Choice => ")
choice = input(choices)
if choice in ["1", "2"]:
if choice == "1":
received_data = listener.run()
elif choice == "2":
received_data = input("Paste the encoded cookies here => ")
data = json.loads(base64.b64decode(received_data))
cookies = data["cookies"]
oauth_token = data["oauth_token"]
elif choice == "3":
for name in cookies.keys():
cookies[name] = input(f"{name} => ").strip().strip('"')
oauth_token = input(f"oauth_token").strip().strip('"')
else:
exit("Please choose a valid choice. Exiting...")
return cookies, oauth_token

19
ghunt/helpers/banner.py Normal file
View File

@@ -0,0 +1,19 @@
from ghunt import globals as gb
def show_banner():
banner = """
[red] .d8888b. [/][blue]888 888[/][red] 888
[/][red]d88P Y88b [/][blue]888 888[/][red] 888
[/][yellow]888 [/][red]888 [/][blue]888 888[/][red] 888
[/][yellow]888 [/][blue]8888888888[/][green] 888 888[/][yellow] 88888b. [/][red] 888888
[/][yellow]888 [/][blue]88888 [/][blue]888 888[/][green] 888 888[/][yellow] 888 "88b[/][red] 888
[/][yellow]888 [/][blue]888 [/][blue]888 888[/][green] 888 888[/][yellow] 888 888[/][red] 888
[/][green]Y88b d88P [/][blue]888 888[/][green] Y88b 888[/][yellow] 888 888[/][red] Y88b.
[/][green] "Y8888P88 [/][blue]888 888[/][green] "Y88888[/][yellow] 888 888[/][red] "Y888[/red] v2
[bold]By: mxrch (🐦 [deep_sky_blue1]@mxrchreborn[/deep_sky_blue1])
[indian_red1]Support my work on GitHub Sponsors ! 💖[/indian_red1][/bold]
"""
gb.rc.print(banner)

85
ghunt/helpers/calendar.py Normal file
View File

@@ -0,0 +1,85 @@
from xmlrpc.client import Boolean
from dateutil.relativedelta import relativedelta
from beautifultable import BeautifulTable
import httpx
from typing import *
from copy import deepcopy
from ghunt.parsers.calendar import Calendar, CalendarEvents
from ghunt.objects.base import GHuntCreds
from ghunt.objects.utils import TMPrinter
from ghunt.apis.calendar import CalendarHttp
async def fetch_all(ghunt_creds: GHuntCreds, as_client: httpx.AsyncClient, email_address: str) -> Tuple[Boolean, Calendar, CalendarEvents]:
calendar_api = CalendarHttp(ghunt_creds)
found, calendar = await calendar_api.get_calendar(as_client, email_address)
if not found:
return False, None, None
tmprinter = TMPrinter()
_, events = await calendar_api.get_events(as_client, email_address, params_template="max_from_beginning")
next_page_token = deepcopy(events.next_page_token)
while next_page_token:
tmprinter.out(f"[~] Dumped {len(events.items)} events...")
_, new_events = await calendar_api.get_events(as_client, email_address, params_template="max_from_beginning", page_token=next_page_token)
events.items += new_events.items
next_page_token = deepcopy(new_events.next_page_token)
tmprinter.clear()
return True, calendar, events
def out(calendar: Calendar, events: CalendarEvents, email_address: str, display_name="", limit=5):
"""
Output fetched calendar events.
if limit = 0, = all events are shown
"""
### Calendar
print(f"Calendar ID : {calendar.id}")
if calendar.summary != calendar.id:
print(f"[+] Calendar Summary : {calendar.summary}")
print(f"Calendar Timezone : {calendar.time_zone}\n")
### Events
target_events = events.items[-limit:]
if target_events:
print(f"[+] {len(events.items)} event{'s' if len(events.items) > 1 else ''} dumped ! Showing the last {len(target_events)} one{'s' if len(target_events) > 1 else ''}...\n")
table = BeautifulTable()
table.set_style(BeautifulTable.STYLE_GRID)
table.columns.header = ["Name", "Datetime (UTC)", "Duration"]
for event in target_events:
title = "/"
if event.summary:
title = event.summary
duration = "?"
if event.end.date_time and event.start.date_time:
duration = relativedelta(event.end.date_time, event.start.date_time)
if duration.days or duration.hours or duration.minutes:
duration = (f"{(str(duration.days) + ' day' + ('s' if duration.days > 1 else '')) if duration.days else ''} "
f"{(str(duration.hours) + ' hour' + ('s' if duration.hours > 1 else '')) if duration.hours else ''} "
f"{(str(duration.minutes) + ' minute' + ('s' if duration.minutes > 1 else '')) if duration.minutes else ''}").strip()
date = "?"
if event.start.date_time:
date = event.start.date_time.strftime("%Y/%m/%d %H:%M:%S")
table.rows.append([title, date, duration])
print(table)
print(f"\n🗃️ Download link :\n=> https://calendar.google.com/calendar/ical/{email_address}/public/basic.ics")
else:
print("[-] No events dumped.")
### Names
names = set()
for event in events.items:
if event.creator.email == email_address and (name := event.creator.display_name) and name != display_name:
names.add(name)
if names:
print("\n[+] Found other names used by the target :")
for name in names:
print(f"- {name}")

65
ghunt/helpers/drive.py Normal file
View File

@@ -0,0 +1,65 @@
from typing import *
from ghunt.parsers.drive import DriveComment, DriveCommentList, DriveCommentReply, DriveFile
from ghunt.objects.base import DriveExtractedUser
from ghunt.helpers.utils import oprint # TEMP
def get_users_from_file(file: DriveFile) -> List[DriveExtractedUser]:
"""
Extracts the users from the permissions of a Drive file,
and the last modifying user.
"""
users: Dict[str, DriveExtractedUser] = {}
for perms in [file.permissions, file.permissions_summary.select_permissions]:
for perm in perms:
if not perm.email_address:
continue
#oprint(perm)
user = DriveExtractedUser()
user.email_address = perm.email_address
user.gaia_id = perm.user_id
user.name = perm.name
user.role = perm.role
users[perm.email_address] = user
# Last modifying user
target_user = file.last_modifying_user
if target_user.id:
email = target_user.email_address
if not email:
email = target_user.email_address_from_account
if not email:
return users
if email in users:
users[email].is_last_modifying_user = True
return list(users.values())
def get_comments_from_file(comments: DriveCommentList) -> List[Tuple[str, Dict[str, any]]]:
"""
Extracts the comments and replies of a Drive file.
"""
def update_stats(authors: List[Dict[str, Dict[str, any]]], comment: DriveComment|DriveCommentReply):
name = comment.author.display_name
pic_url = comment.author.picture.url
key = f"{name}${pic_url}" # Two users can have the same name, not the same picture URL (I hope so)
# So we do this to make users "unique"
if key not in authors:
authors[key] = {
"name": name,
"pic_url": pic_url,
"count": 0
}
authors[key]["count"] += 1
authors: Dict[str, Dict[str, any]] = {}
for comment in comments.items:
update_stats(authors, comment)
for reply in comment.replies:
update_stats(authors, reply)
return sorted(authors.items(), key=lambda k_v: k_v[1]['count'], reverse=True)

10
ghunt/helpers/gmail.py Normal file
View File

@@ -0,0 +1,10 @@
import httpx
async def is_email_registered(as_client: httpx.AsyncClient, email: str) -> bool:
"""
Abuse the gxlu endpoint to check if any email address
is registered on Google. (not only gmail accounts)
"""
req = await as_client.get(f"https://mail.google.com/mail/gxlu", params={"email": email})
return "Set-Cookie" in req.headers

420
ghunt/helpers/gmaps.py Normal file
View File

@@ -0,0 +1,420 @@
from dateutil.relativedelta import relativedelta
from datetime import datetime
import json
from geopy import distance
from geopy.geocoders import Nominatim
from typing import *
import httpx
from alive_progress import alive_bar
from ghunt import globals as gb
from ghunt.objects.base import *
from ghunt.helpers.utils import *
from ghunt.objects.utils import *
from ghunt.helpers.knowledge import get_gmaps_type_translation
def get_datetime(datepublished: str):
"""
Get an approximative date from the maps review date
Examples : 'last 2 days', 'an hour ago', '3 years ago'
"""
if datepublished.split()[0] in ["a", "an"]:
nb = 1
else:
if datepublished.startswith("last"):
nb = int(datepublished.split()[1])
else:
nb = int(datepublished.split()[0])
if "minute" in datepublished:
delta = relativedelta(minutes=nb)
elif "hour" in datepublished:
delta = relativedelta(hours=nb)
elif "day" in datepublished:
delta = relativedelta(days=nb)
elif "week" in datepublished:
delta = relativedelta(weeks=nb)
elif "month" in datepublished:
delta = relativedelta(months=nb)
elif "year" in datepublished:
delta = relativedelta(years=nb)
else:
delta = relativedelta()
return (datetime.today() - delta).replace(microsecond=0, second=0)
async def get_reviews(as_client: httpx.AsyncClient, gaia_id: str) -> Tuple[str, Dict[str, int], List[MapsReview], List[MapsPhoto]]:
"""Extracts the target's statistics, reviews and photos."""
next_page_token = ""
agg_reviews = []
agg_photos = []
stats = {}
req = await as_client.get(f"https://www.google.com/locationhistory/preview/mas?authuser=0&hl=en&gl=us&pb={gb.config.templates['gmaps_pb']['stats'].format(gaia_id)}")
data = json.loads(req.text[5:])
if not data[16][8]:
return "empty", stats, [], []
stats = {sec[6]:sec[7] for sec in data[16][8][0]}
total_reviews = stats["Reviews"] + stats["Ratings"] + stats["Photos"]
if not total_reviews:
return "empty", stats, [], []
with alive_bar(total_reviews, receipt=False) as bar:
for category in ["reviews", "photos"]:
first = True
while True:
if first:
req = await as_client.get(f"https://www.google.com/locationhistory/preview/mas?authuser=0&hl=en&gl=us&pb={gb.config.templates['gmaps_pb'][category]['first'].format(gaia_id)}")
first = False
else:
req = await as_client.get(f"https://www.google.com/locationhistory/preview/mas?authuser=0&hl=en&gl=us&pb={gb.config.templates['gmaps_pb'][category]['page'].format(gaia_id, next_page_token)}")
data = json.loads(req.text[5:])
new_reviews = []
new_photos = []
next_page_token = ""
# Reviews
if category == "reviews":
if not data[24]:
return "private", stats, [], []
reviews_data = data[24][0]
if not reviews_data:
break
for review_data in reviews_data:
review = MapsReview()
review.id = review_data[0][10]
review.approximative_date = get_datetime(review_data[0][1]) # UTC
review.comment = review_data[0][3]
review.rating = review_data[0][4]
if len(review_data[0]) >= 50 and review_data[0][49]:
guided_data = review_data[0][49]
for guided_section in guided_data:
if not guided_section[2]:
continue
guided = MapsGuidedAnswer()
guided.id = guided_section[0][0]
guided.question = guided_section[1]
guided.answer = guided_section[2][0][0][1]
review.guided_answers.append(guided)
review.location.id = review_data[1][14][0]
review.location.name = review_data[1][2]
review.location.address = review_data[1][3]
review.location.tags = review_data[1][4] if review_data[1][4] else []
review.location.types = [x for x in review_data[1][8] if x]
if review_data[1][0]:
review.location.position.latitude = review_data[1][0][2]
review.location.position.longitude = review_data[1][0][3]
if len(review_data[1]) > 31 and review_data[1][31]:
review.location.cost = len(review_data[1][31])
new_reviews.append(review)
bar()
agg_reviews += new_reviews
if not new_reviews or len(data[24]) < 4 or not data[24][3]:
break
next_page_token = data[24][3].strip("=")
# Photos
elif category == "photos" :
if not data[22]:
return "private", stats, [], []
photos_data = data[22][1]
if not photos_data:
break
for photo_data in photos_data:
photos = MapsPhoto()
photos.id = photo_data[0][10]
photos.url = photo_data[0][6][0].split("=")[0]
date = photo_data[0][21][6][8]
photos.exact_date = datetime(date[0], date[1], date[2], date[3]) # UTC
photos.approximative_date = get_datetime(date[8][0]) # UTC
if len(photo_data) > 1:
photos.location.id = photo_data[1][14][0]
photos.location.name = photo_data[1][2]
photos.location.address = photo_data[1][3]
photos.location.tags = photo_data[1][4] if photo_data[1][4] else []
photos.location.types = [x for x in photo_data[1][8] if x] if photo_data[1][8] else []
if photo_data[1][0]:
photos.location.position.latitude = photo_data[1][0][2]
photos.location.position.longitude = photo_data[1][0][3]
if len(photo_data[1]) > 31 and photo_data[1][31]:
photos.location.cost = len(photo_data[1][31])
new_photos.append(photos)
bar()
agg_photos += new_photos
if not new_photos or len(data[22]) < 4 or not data[22][3]:
break
next_page_token = data[22][3].strip("=")
return "", stats, agg_reviews, agg_photos
def avg_location(locs: Tuple[float, float]):
"""
Calculates the average location
from a list of (latitude, longitude) tuples.
"""
latitude = []
longitude = []
for loc in locs:
latitude.append(loc[0])
longitude.append(loc[1])
latitude = sum(latitude) / len(latitude)
longitude = sum(longitude) / len(longitude)
return latitude, longitude
def translate_confidence(percents: int):
"""Translates the percents number to a more human-friendly text"""
if percents >= 100:
return "Extremely high"
elif percents >= 80:
return "Very high"
elif percents >= 60:
return "Little high"
elif percents >= 40:
return "Okay"
elif percents >= 20:
return "Low"
elif percents >= 10:
return "Very low"
else:
return "Extremely low"
def sanitize_location(location: Dict[str, str]):
"""Returns the nearest place from a Nomatim location response."""
not_country = False
not_town = False
town = "?"
country = "?"
if "city" in location:
town = location["city"]
elif "village" in location:
town = location["village"]
elif "town" in location:
town = location["town"]
elif "municipality" in location:
town = location["municipality"]
else:
not_town = True
if not "country" in location:
not_country = True
location["country"] = country
if not_country and not_town:
return False
location["town"] = town
return location
def calculate_probable_location(geolocator: Nominatim, reviews_and_photos: List[MapsReview|MapsPhoto], gmaps_radius: int):
"""Calculates the probable location from a list of reviews and the max radius."""
tmprinter = TMPrinter()
radius = gmaps_radius
locations = {}
tmprinter.out(f"Calculation of the distance of each review...")
for nb, review in enumerate(reviews_and_photos):
if not review.location.position.latitude or not review.location.position.longitude:
continue
if review.location.id not in locations:
locations[review.location.id] = {"dates": [], "locations": [], "range": None, "score": 0}
location = (review.location.position.latitude, review.location.position.longitude)
for review2 in reviews_and_photos:
location2 = (review2.location.position.latitude, review2.location.position.longitude)
dis = distance.distance(location, location2).km
if dis <= radius:
locations[review.location.id]["dates"].append(review2.approximative_date)
locations[review.location.id]["locations"].append(location2)
maxdate = max(locations[review.location.id]["dates"])
mindate = min(locations[review.location.id]["dates"])
locations[review.location.id]["range"] = maxdate - mindate
tmprinter.out(f"Calculation of the distance of each review ({nb}/{len(reviews_and_photos)})...")
tmprinter.clear()
locations = {k: v for k, v in
sorted(locations.items(), key=lambda k: len(k[1]["locations"]), reverse=True)} # We sort it
tmprinter.out("Identification of redundant areas...")
to_del = []
for id in locations:
if id in to_del:
continue
for id2 in locations:
if id2 in to_del or id == id2:
continue
if all([loc in locations[id]["locations"] for loc in locations[id2]["locations"]]):
to_del.append(id2)
for hash in to_del:
del locations[hash]
tmprinter.out("Calculating confidence...")
maxrange = max([locations[hash]["range"] for hash in locations])
maxlen = max([len(locations[hash]["locations"]) for hash in locations])
minreq = 3
mingroups = 3
score_steps = 4
for hash, loc in locations.items():
if len(loc["locations"]) == maxlen:
locations[hash]["score"] += score_steps * 4
if loc["range"] == maxrange:
locations[hash]["score"] += score_steps * 3
if len(locations) >= mingroups:
others = sum([len(locations[h]["locations"]) for h in locations if h != hash])
if len(loc["locations"]) > others:
locations[hash]["score"] += score_steps * 2
if len(loc["locations"]) >= minreq:
locations[hash]["score"] += score_steps
panels = sorted(set([loc["score"] for loc in locations.values()]), reverse=True)
maxscore = sum([p * score_steps for p in range(1, score_steps + 1)])
for panel in panels:
locs = [loc for loc in locations.values() if loc["score"] == panel]
if len(locs[0]["locations"]) == 1:
panel /= 2
if len(reviews_and_photos) < 4:
panel /= 2
confidence = translate_confidence(panel / maxscore * 100)
for nb, loc in enumerate(locs):
avg = avg_location(loc["locations"])
while True:
try:
location = geolocator.reverse(f"{avg[0]}, {avg[1]}", timeout=10).raw["address"]
break
except:
pass
location = sanitize_location(location)
locs[nb]["avg"] = location
del locs[nb]["locations"]
del locs[nb]["score"]
del locs[nb]["range"]
del locs[nb]["dates"]
tmprinter.clear()
return confidence, locs
def output(err: str, stats: Dict[str, int], reviews: List[MapsReview], photos: List[MapsPhoto], gaia_id: str):
"""Pretty print the Maps results, and do some guesses."""
print(f"\nProfile page : https://www.google.com/maps/contrib/{gaia_id}/reviews")
reviews_and_photos: List[MapsReview|MapsPhoto] = reviews + photos
if err != "private" and (err == "empty" or not reviews_and_photos):
print("\n[-] No review.")
return
print("\n[Statistics]")
for section, number in stats.items():
if number:
print(f"{section} : {number}")
if err == "private":
print("\n[-] Reviews are private.")
return
print("\n[Reviews]")
avg_ratings = round(sum([x.rating for x in reviews]) / len(reviews), 1)
print(f"[+] Average rating : {ppnb(avg_ratings)}/5\n")
costs_table = {
1: "Inexpensive",
2: "Moderately expensive",
3: "Expensive",
4: "Very expensive"
}
total_costs = 0
costs_stats = {x:0 for x in range(1,5)}
for review in reviews_and_photos:
if review.location.cost:
costs_stats[review.location.cost] += 1
total_costs += 1
costs_stats = dict(sorted(costs_stats.items(), key=lambda item: item[1], reverse=True)) # We sort the dict by cost popularity
if total_costs:
print("[Costs]")
for cost, desc in costs_table.items():
line = f"> {ppnb(round(costs_stats[cost]/total_costs*100, 1))}% {desc} ({costs_stats[cost]})"
style = ""
if not costs_stats[cost]:
style = "bright_black"
elif costs_stats[cost] == list(costs_stats.values())[0]:
style = "spring_green1"
gb.rc.print(line, style=style)
avg_costs = round(sum([x*y for x,y in costs_stats.items()]) / total_costs)
print(f"\n[+] Average costs : {costs_table[avg_costs]}")
else:
print("[-] No costs data.")
types = {}
for review in reviews_and_photos:
for type in review.location.types:
if type not in types:
types[type] = 0
types[type] += 1
types = dict(sorted(types.items(), key=lambda item: item[1], reverse=True))
types_and_tags = {}
for review in reviews_and_photos:
for type in review.location.types:
if type not in types_and_tags:
types_and_tags[type] = {}
for tag in review.location.tags:
if tag not in types_and_tags[type]:
types_and_tags[type][tag] = 0
types_and_tags[type][tag] += 1
types_and_tags[type] = dict(sorted(types_and_tags[type].items(), key=lambda item: item[1], reverse=True))
types_and_tags = dict(sorted(types_and_tags.items()))
if types_and_tags:
print("\nTarget's locations preferences :")
unknown_trads = []
for type, type_count in types.items():
tags_counts = types_and_tags[type]
translation = get_gmaps_type_translation(type)
if not translation:
unknown_trads.append(type)
gb.rc.print(f"\n🏨 [underline]{translation if translation else type.title()} [{type_count}]", style="bold")
nb = 0
for tag, tag_count in list(tags_counts.items()):
if nb >= 7:
break
elif tag.lower() == type:
continue
print(f"- {tag} ({tag_count})")
nb += 1
if unknown_trads:
print(f"\n⚠️ The following gmaps types haven't been found in GHunt\'s knowledge.")
for type in unknown_trads:
print(f"- {type}")
print("Please open an issue on the GHunt Github or submit a PR to add it !")
geolocator = Nominatim(user_agent="nominatim")
confidence, locations = calculate_probable_location(geolocator, reviews_and_photos, gb.config.gmaps_radius)
print(f"\n[+] Probable location (confidence => {confidence}) :")
loc_names = []
for loc in locations:
loc_names.append(
f"- {loc['avg']['town']}, {loc['avg']['country']}"
)
loc_names = set(loc_names) # delete duplicates
for loc in loc_names:
print(loc)

33
ghunt/helpers/ia.py Normal file
View File

@@ -0,0 +1,33 @@
from ghunt import globals as gb
from ghunt.apis.vision import VisionHttp
import httpx
import trio
from base64 import b64encode
async def detect_face(vision_api: VisionHttp, as_client: httpx.AsyncClient, image_url: str) -> None:
req = await as_client.get(image_url)
encoded_image = b64encode(req.content).decode()
are_faces_found = False
faces_results = None
for retry in range(5):
rate_limited, are_faces_found, faces_results = await vision_api.detect_faces(as_client, image_content=encoded_image)
if not rate_limited:
break
await trio.sleep(0.5)
else:
exit("\n[-] Vision API keeps rate-limiting.")
if are_faces_found:
if len(faces_results.face_annotations) > 1:
gb.rc.print(f"🎭 {len(faces_results.face_annotations)} faces detected !", style="italic")
else:
gb.rc.print(f"🎭 [+] Face detected !", style="italic bold")
else:
gb.rc.print(f"🎭 No face detected.", style="italic bright_black")
return faces_results

View File

@@ -0,0 +1,39 @@
from ghunt.knowledge.services import services_baseurls
from ghunt.knowledge.keys import keys
from ghunt.knowledge.maps import types_translations
from ghunt.knowledge.people import user_types
from ghunt.knowledge.sig import sigs
from ghunt.errors import GHuntKnowledgeError
from typing import *
def get_domain_of_service(service: str) -> str:
if service not in services_baseurls:
raise GHuntKnowledgeError(f'The service "{service}" has not been found in GHunt\'s services knowledge.')
return services_baseurls.get(service)
def get_origin_of_key(key_name: str) -> str:
if key_name not in keys:
raise GHuntKnowledgeError(f'The key "{key_name}" has not been found in GHunt\'s API keys knowledge.')
return keys.get(key_name, {}).get("origin")
def get_api_key(key_name: str) -> str:
if key_name not in keys:
raise GHuntKnowledgeError(f'The key "{key_name}" has not been found in GHunt\'s API keys knowledge.')
return keys.get(key_name, {}).get("key")
def get_gmaps_type_translation(type_name: str) -> str:
if type_name not in types_translations:
raise GHuntKnowledgeError(f'The gmaps type "{type_name}" has not been found in GHunt\'s knowledge.\nPlease open an issue on the GHunt Github or submit a PR to add it !')
return types_translations.get(type_name)
def get_user_type_definition(type_name: str) -> str:
if type_name not in user_types:
raise GHuntKnowledgeError(f'The user type "{type_name}" has not been found in GHunt\'s knowledge.\nPlease open an issue on the GHunt Github or submit a PR to add it !')
return user_types.get(type_name)
def get_package_sig(package_name: str) -> str:
if package_name not in sigs:
raise GHuntKnowledgeError(f'The package name "{package_name}" has not been found in GHunt\'s SIGs knowledge.')
return sigs.get(package_name)

View File

@@ -1,9 +1,10 @@
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
from typing import *
from time import sleep
from ghunt.objects.base import SmartObj
class DataBridge():
class DataBridge(SmartObj):
def __init__(self):
self.data = None

View File

@@ -0,0 +1,77 @@
from ghunt.objects.base import GHuntCreds
from ghunt.apis.playgames import PlayGames
from ghunt.apis.playgateway import PlayGatewayPaGrpc
from ghunt.parsers.playgames import Player, PlayerProfile
from ghunt.parsers.playgateway import PlayerSearchResult
from ghunt.objects.utils import TMPrinter
import httpx
from alive_progress import alive_bar
from typing import *
async def get_player(ghunt_creds: GHuntCreds, as_client: httpx.AsyncClient, player_id: str):
playgames = PlayGames(ghunt_creds)
tmprinter = TMPrinter()
tmprinter.out("[~] Getting player profile...")
is_found, player_profile = await playgames.get_profile(as_client, player_id)
tmprinter.clear()
if not is_found or not player_profile.profile_settings.profile_visible:
return is_found, Player()
playgateway_pa = PlayGatewayPaGrpc(ghunt_creds)
player_stats = await playgateway_pa.get_player_stats(as_client, player_id)
with alive_bar(player_stats.played_games_count, title="🚎 Fetching played games...", receipt=False) as bar:
_, next_page_token, played_games = await playgames.get_played_games(as_client, player_id)
bar(len(played_games.games))
while next_page_token:
_, next_page_token, new_played_games = await playgames.get_played_games(as_client, player_id, next_page_token)
played_games.games += new_played_games.games
bar(len(new_played_games.games))
with alive_bar(player_stats.achievements_count, title="🚎 Fetching achievements...", receipt=False) as bar:
_, next_page_token, achievements = await playgames.get_achievements(as_client, player_id)
bar(len(achievements.achievements))
while next_page_token:
_, next_page_token, new_achievements = await playgames.get_achievements(as_client, player_id, next_page_token)
achievements.achievements += new_achievements.achievements
bar(len(new_achievements.achievements))
player = Player(player_profile, played_games.games, achievements.achievements)
return is_found, player
async def search_player(ghunt_creds: GHuntCreds, as_client: httpx.AsyncClient, query: str) -> List[PlayerSearchResult]:
playgateway_pa = PlayGatewayPaGrpc(ghunt_creds)
player_search_results = await playgateway_pa.search_player(as_client, query)
return player_search_results.results
def output(player: Player):
if not player.profile.profile_settings.profile_visible:
print("\n[-] Profile is private.")
return
print("\n[+] Profile is public !")
print(f"\n[+] Played to {len(player.played_games)} games")
print(f"[+] Got {len(player.achievements)} achievements")
if player.played_games:
print(f"\n[+] Last played game : {player.profile.last_played_app.app_name} ({player.profile.last_played_app.timestamp_millis} UTC)")
if player.achievements:
app_ids_count = {}
for achievement in player.achievements:
if (app_id := achievement.app_id) not in app_ids_count:
app_ids_count[app_id] = 0
app_ids_count[app_id] += 1
app_ids_count = dict(sorted(app_ids_count.items(), key=lambda item: item[1], reverse=True))
achiv_nb = list(app_ids_count.values())[0]
target_game = None
for game in player.played_games:
if game.game_data.id == list(app_ids_count.keys())[0]:
target_game = game
break
print(f"[+] Game with the most achievements : {target_game.game_data.name} ({achiv_nb})")

124
ghunt/helpers/utils.py Normal file
View File

@@ -0,0 +1,124 @@
from pathlib import Path
from PIL import Image
import hashlib
from typing import *
from time import time
from datetime import datetime, timezone
from dateutil.parser import isoparse
from copy import deepcopy
import jsonpickle
import json
import httpx
import imagehash
from io import BytesIO
from ghunt import globals as gb
def get_httpx_client() -> httpx.AsyncClient:
"""
Returns a customized to better support the needs of GHunt CLI users.
"""
return httpx.AsyncClient(http2=True, timeout=15)
def oprint(obj: any) -> str:
serialized = jsonpickle.encode(obj)
pretty_output = json.dumps(json.loads(serialized), indent=2)
print(pretty_output)
def within_docker() -> bool:
return Path('/.dockerenv').is_file()
def gen_sapisidhash(sapisid: str, origin: str, timestamp: str = str(int(time()))) -> str:
return f"{timestamp}_{hashlib.sha1(' '.join([timestamp, sapisid, origin]).encode()).hexdigest()}"
def extract_set_cookies(req: httpx.Response) -> Dict[str, str]:
return {pair[0]:''.join(pair[1:]) for x in req.headers.get_list("set-cookie") if (pair := x.split(";")[0].split("="))}
def inject_osid(cookies: Dict[str, str], osids: Dict[str, str], service: str) -> Dict[str, str]:
cookies_with_osid = deepcopy(cookies)
cookies_with_osid["OSID"] = osids[service]
return cookies_with_osid
def is_headers_syntax_good(headers: Dict[str, str]) -> bool:
try:
httpx.Headers(headers)
return True
except:
return False
async def get_url_image_flathash(as_client: httpx.AsyncClient, image_url: str) -> str:
req = await as_client.get(image_url)
img = Image.open(BytesIO(req.content))
flathash = imagehash.average_hash(img)
return str(flathash)
async def is_default_profile_pic(as_client: httpx.AsyncClient, image_url: str) -> Tuple[bool, str]:
"""
Returns a boolean which indicates if the image_url
is a default profile picture, and the flathash of
the image.
"""
flathash = await get_url_image_flathash(as_client, image_url)
if imagehash.hex_to_flathash(flathash, 8) - imagehash.hex_to_flathash("000018183c3c0000", 8) < 10 :
return True, str(flathash)
return False, str(flathash)
def get_class_name(obj) -> str:
return str(obj).strip("<>").split(" ")[0]
def get_datetime_utc(date_str):
"""Converts ISO to datetime object in UTC"""
date = isoparse(date_str)
margin = date.utcoffset()
return date.replace(tzinfo=timezone.utc) - margin
def ppnb(nb: float|int) -> float:
"""
Pretty print float number
Ex: 3.9 -> 3.9
4.0 -> 4
4.1 -> 4.1
"""
try:
return int(nb) if nb % int(nb) == 0.0 else nb
except ZeroDivisionError:
if nb == 0.0:
return 0
else:
return nb
def parse_oauth_flow_response(body: str):
"""
Correctly format the response sent by android.googleapis.com
during the Android OAuth2 Login Flow.
"""
return {sp[0]:'='.join(sp[1:]) for x in body.split("\n") if (sp := x.split("="))}
def humanize_list(array: List[any]):
"""
Transforms a list to a human sentence.
Ex : ["reader", "writer", "owner"] -> "reader, writer and owner".
"""
if len(array) <= 1:
return ''.join(array)
final = ""
for nb, item in enumerate(array):
if nb == 0:
final += f"{item}"
elif nb+1 < len(array):
final += f", {item}"
else:
final += f" and {item}"
return final
def unicode_patch(txt: str):
bad_chars = {
"é": "e",
"è": "e",
"ç": "c",
"à": "a"
}
return txt.replace(''.join([*bad_chars.keys()]), ''.join([*bad_chars.values()]))

129
ghunt/knowledge/drive.py Normal file
View File

@@ -0,0 +1,129 @@
default_file_capabilities = [
'can_block_owner',
'can_copy',
'can_download',
'can_print',
'can_read',
'can_remove_my_drive_parent'
]
default_folder_capabilities = [
'can_block_owner',
'can_download',
'can_list_children',
'can_print',
'can_read',
'can_remove_my_drive_parent'
]
request_fields = [
'copyRequiresWriterPermission',
'sourceAppId',
'authorizedAppIds',
'linkShareMetadata',
'teamDriveId',
'primaryDomainName',
'approvalMetadata',
'md5Checksum',
'resourceKey',
'quotaBytesUsed',
'hasChildFolders',
'fullFileExtension',
'isAppAuthorized',
'iconLink',
'trashingUser',
'title',
'recency',
'detectors',
'exportLinks',
'modifiedDate',
'copyable',
'description',
'mimeType',
'passivelySubscribed',
'videoMediaMetadata',
'headRevisionId',
'customerId',
'fileExtension',
'originalFilename',
'parents',
'imageMediaMetadata',
'recencyReason',
'folderColorRgb',
'createdDate',
'labels',
'abuseNoticeReason',
'webViewLink',
'driveId',
'ownedByMe',
'flaggedForAbuse',
'lastModifyingUser',
'thumbnailLink',
'capabilities',
'sharedWithMeDate',
'primarySyncParentId',
'sharingUser',
'version',
'permissionsSummary',
'actionItems',
'labelInfo',
'explicitlyTrashed',
'shared',
'subscribed',
'ancestorHasAugmentedPermissions',
'writersCanShare',
'permissions',
'alternateLink',
'hasLegacyBlobComments',
'id',
'userPermission',
'hasThumbnail',
'lastViewedByMeDate',
'fileSize',
'kind',
'thumbnailVersion',
'spaces',
'organizationDisplayName',
'abuseIsAppealable',
'trashedDate',
'folderFeatures',
'webContentLink',
'contentRestrictions',
'shortcutDetails',
'folderColor',
'hasAugmentedPermissions'
]
mime_types = {
"application/vnd.google-apps.audio": "Audio 🎧",
"application/vnd.google-apps.document": "Google Docs 📝",
"application/vnd.google-apps.drive-sdk": "3rd party shortcut ↪️",
"application/vnd.google-apps.drawing": "Google Drawing ✏️",
"application/vnd.google-apps.file": "Google Drive file 📄",
"application/vnd.google-apps.folder": "Google Drive folder 🗂️",
"application/vnd.google-apps.form": "Google Forms 👨‍🏫",
"application/vnd.google-apps.fusiontable": "Google Fusion Tables 🌶️",
"application/vnd.google-apps.jam": "Google Jamboard 🖍️",
"application/vnd.google-apps.map": "Google My Maps 📍",
"application/vnd.google-apps.photo": "Photo 📷",
"application/vnd.google-apps.presentation": "Google Slides ❇️",
"application/vnd.google-apps.script": "Google Apps Scripts 📜",
"application/vnd.google-apps.shortcut": "Shortcut ↩️",
"application/vnd.google-apps.site": "Google Sites 🌐",
"application/vnd.google-apps.spreadsheet": "Google Sheets 📟",
"application/vnd.google-apps.unknown": "Unknown ❔",
"application/vnd.google-apps.video": "Video 📼",
"application/pdf": "PDF Document 📕",
"application/msword": "Microsoft Word document 📝",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "OpenXML Word document 📝",
"application/vnd.ms-powerpoint.presentation.macroEnabled.12": "Microsoft Powerpoint with macros ❇️",
"application/vnd.ms-excel": "Microsoft Excel spreadsheet 📟",
"image/jpeg": "JPEG Image 🖼️",
"audio/mpeg": "MPEG Audio 🎧",
"video/mpeg": "MPEG Video 📼",
"application/zip": "ZIP Archive 🗃️",
"text/plain": "Plain Text 📃",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "OpenXML Spreadsheet document ❇️",
"application/vnd.android.package-archive": "Android Package 📱",
"application/vnd.google-apps.kix": "Google Apps 🈸"
}

7
ghunt/knowledge/keys.py Normal file
View File

@@ -0,0 +1,7 @@
keys = {
"pantheon": {"key": "AIzaSyCI-zsRP85UVOi0DjtiCwWBwQ1djDy741g", "origin": "https://console.cloud.google.com"},
"photos": {"key": "AIzaSyAa2odBewW-sPJu3jMORr0aNedh3YlkiQc", "origin": "https://photos.google.com"},
"apis_explorer": {"key": "AIzaSyAa8yy0GdcGPHdtD083HiGGx_S0vMPScDM", "origin": "https://explorer.apis.google.com"},
"calendar": {"key": "AIzaSyBNlYH01_9Hc5S1J9vuFmu2nUqBZJNAXxs", "origin": "https://calendar.google.com"},
"youtubei": {"key": "AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w", "origin": "https://youtube.com"}
}

35
ghunt/knowledge/maps.py Normal file
View File

@@ -0,0 +1,35 @@
types_translations = {
'airport': 'Airport',
'bar': 'Bar',
'bank_intl': 'Bank',
'bus': 'Bus',
'cafe': 'Café',
'camping': 'Camping',
'cemetery': 'Cemetery',
'civic_bldg': 'Civic building',
'ferry': 'Ferry',
'gas': 'Gas',
'generic': 'Generic',
'golf': 'Golf',
'hospital_H': 'Hospital H',
'library': 'Library',
'lodging': 'Lodging',
'monument': 'Monument',
'movie': 'Movie',
'museum': 'Museum',
'parking': 'Parking',
'police': 'Police',
'postoffice': 'Post office',
'restaurant': 'Restaurant',
'school': 'School',
'shoppingbag': 'Shopping bag',
'shoppingcart': 'Shopping cart',
'train': 'Train',
'tram': 'Tram',
'tree': 'Park',
'worship_buddhist': 'Worship Buddhist',
'worship_christian': 'Worship Christian',
'worship_hindu': 'Worship Hindu',
'worship_islam': 'Worship Islam',
'worship_jewish': 'Worship Jewish'
}

15
ghunt/knowledge/people.py Normal file
View File

@@ -0,0 +1,15 @@
# https://developers.google.com/people/api/rest/v1/people#usertype
user_types = {
"USER_TYPE_UNKNOWN": "The user type is not known.", # Official
"GOOGLE_USER": "The user is a Google user.", # Official
"GPLUS_USER": "The user is a Currents user.", # Official
"GOOGLE_APPS_USER": "The user is a Google Workspace user.", # Official
"OWNER_USER_TYPE_UNKNOWN": "The user type is not known.", # Guess
"GPLUS_DISABLED_BY_ADMIN": "This user's Currents account has been disabled by an admin.", # Guess
"GOOGLE_APPS_USER": "The user is a Google Apps user.", # Guess
"GOOGLE_FAMILY_USER": "The user is a Google Family user.", # Guess
"GOOGLE_FAMILY_CHILD_USER": "The user is a Google Family child user.", # Guess
"GOOGLE_APPS_ADMIN_DISABLED": "This admin of Google Apps has been disabled.", # Guess
"GOOGLE_ONE_USER": "The user is a Google One user.", # Guess
"GOOGLE_FAMILY_CONVERTED_CHILD_USER": "This Google Family user was converted to a child user." # Guess
}

View File

@@ -0,0 +1,4 @@
services_baseurls = {
"cloudconsole": "console.cloud.google.com",
"cl": "calendar.google.com"
}

5
ghunt/knowledge/sig.py Normal file
View File

@@ -0,0 +1,5 @@
sigs = {
"com.google.android.play.games": "38918a453d07199354f8b19af05ec6562ced5788",
"com.google.android.apps.docs": "38918a453d07199354f8b19af05ec6562ced5788",
"com.google.android.youtube": "24bb24c05e47e0aefa68a58a766179d9b613a600"
}

View File

210
ghunt/modules/drive.py Normal file
View File

@@ -0,0 +1,210 @@
from ghunt.helpers.utils import *
from ghunt.objects.base import DriveExtractedUser, GHuntCreds
from ghunt.apis.drive import DriveHttp
from ghunt.apis.clientauthconfig import ClientAuthConfigHttp
from ghunt import globals as gb
from ghunt.helpers import auth
from ghunt.helpers.drive import get_comments_from_file, get_users_from_file
from ghunt.knowledge import drive as drive_knownledge
import httpx
import inflection
import humanize
import inspect
from typing import *
from datetime import timedelta
def show_user(user: DriveExtractedUser):
if user.name:
print(f"Full name : {user.name}")
else:
gb.rc.print("Full name : -", style="bright_black")
print(f"Email : {user.email_address}")
if user.gaia_id:
print(f"Gaia ID : {user.gaia_id}")
else:
gb.rc.print("Gaia ID : -", style="bright_black")
if user.is_last_modifying_user:
print("[+] Last user to have modified the document !")
async def hunt(as_client: httpx.AsyncClient, file_id: str, json_file: bool=None):
if not as_client:
as_client = get_httpx_client()
ghunt_creds = GHuntCreds()
ghunt_creds.load_creds()
if not ghunt_creds.are_creds_loaded():
exit("[-] Creds aren't loaded. Are you logged in ?")
if not auth.check_cookies(ghunt_creds.cookies):
exit("[-] Seems like the cookies are invalid. Exiting...")
drive = DriveHttp(ghunt_creds)
file_found, file = await drive.get_file(as_client, file_id)
if not file_found:
exit("[-] The file wasn't found.")
is_folder = file.mime_type == "application/vnd.google-apps.folder"
file_type = drive_knownledge.mime_types.get(file.mime_type)
#gb.rc.print(f"[+] {'Folder' if is_folder else 'File'} found !", style="sea_green3")
gb.rc.print("\n🗃️ Drive properties\n", style="deep_pink4")
print(f"Title : {file.title}")
print(f"{'Folder' if is_folder else 'File'} ID : {file.id}")
if file.md5_checksum:
print(f"MD5 Checksum : {file.md5_checksum}")
if file.file_size:
print(f"{'Folder' if is_folder else 'File'} size : {humanize.naturalsize(file.file_size)}")
if file_type:
gb.rc.print(f"\nType : {file_type} [italic]\[{file.mime_type}][/italic]")
else:
print(f"\nMime Type : {file.mime_type}")
if is_folder:
print(f"Folder link :\n=> {file.alternate_link}")
else:
print(f"Download link :\n=> {file.alternate_link}")
print(f"\n[+] Created date : {file.created_date.strftime('%Y/%m/%d %H:%M:%S (UTC)')}")
print(f"[+] Modified date : {file.modified_date.strftime('%Y/%m/%d %H:%M:%S (UTC)')}")
for perm in file.permissions:
if perm.id == "anyoneWithLink":
giving_roles = [perm.role.upper()] + [x.upper() for x in perm.additional_roles if x != perm.role]
print(f"\n[+] Sharing with link enabled ! Giving the role{'s' if len(giving_roles) > 1 else ''} {humanize_list(giving_roles)}.")
#print("\n[Source application]")
gb.rc.print("\n📱 Source application\n", style="deep_pink2")
brand_found = False
brand = None
if file.source_app_id:
print(f"App ID : {file.source_app_id}")
cac = ClientAuthConfigHttp(ghunt_creds)
brand_found, brand = await cac.get_brand(as_client, file.source_app_id)
if brand_found:
print(f"Name : {brand.display_name}")
if brand.home_page_url:
print(f"Home page : {brand.home_page_url}")
else:
gb.rc.print(f"Home page : [italic][bright_black]Not found.[/italic][/bright_black]")
else:
gb.rc.print("Not found.", style="italic")
else:
gb.rc.print("No source application.", style="italic")
if file.image_media_metadata.height and file.image_media_metadata.width:
#print("\n[Image metadata]")
gb.rc.print("\n📸 Image metadata\n", style="light_coral")
print(f"Height : {file.image_media_metadata.height}")
print(f"Width : {file.image_media_metadata.width}")
if isinstance((data := file.image_media_metadata.rotation), int):
print(f"Rotation : {data}")
if file.video_media_metadata.height and file.video_media_metadata.width:
#print("\n[Video metadata]")
gb.rc.print("\n📸 Video metadata\n", style="light_coral")
print(f"Height : {file.video_media_metadata.height}")
print(f"Width : {file.video_media_metadata.width}")
if (data := file.video_media_metadata.duration_millis):
duration = timedelta(milliseconds=int(file.video_media_metadata.duration_millis))
print(f"Duration : {humanize.precisedelta(duration)}")
#print("\n[Parents]")
gb.rc.print("\n📂 Parents\n", style="gold3")
if file.parents:
print(f"[+] Parents folders :")
for parent in file.parents:
print(f"- 📁 {parent.id}{' [Root folder]' if parent.is_root else ''}")
else:
gb.rc.print("No parent folder found.", style="italic")
if is_folder:
#print("\n[Items]")
gb.rc.print("\n🗃️ Items\n", style="gold3")
found, _, drive_childs = await drive.get_childs(as_client, file_id)
if found and drive_childs.items:
count = f"{x if (x := len(drive_childs.items)) < 1000 else '>= 1000'}"
print(f"[+] {count} items found inside this folder !")
else:
gb.rc.print("No items found.", style="italic")
#print("\n[Users]")
gb.rc.print("\n👪 Users\n", style="dark_orange")
users = get_users_from_file(file)
if (owners := [x for x in users if x.role == "owner"]):
print(f"-> 👤 Owner{'s' if len(owners) > 1 else ''}")
for user in owners:
show_user(user)
if (writers := [x for x in users if x.role == "writer"]):
print(f"\n-> 👤 Writer{'s' if len(writers) > 1 else ''}")
for user in writers:
show_user(user)
if (commenters := [x for x in users if x.role == "commenter"]):
print(f"\n-> 👤 Commenter{'s' if len(commenters) > 1 else ''}")
for user in commenters:
show_user(user)
if (readers := [x for x in users if x.role == "reader"]):
print(f"\n-> 👤 Reader{'s' if len(readers) > 1 else ''}")
for user in readers:
show_user(user)
if (nones := [x for x in users if x.role == "none"]):
print(f"\n-> 👤 User{'s' if len(nones) > 1 else ''} with no right")
for user in nones:
show_user(user)
#print("[Comments]")
gb.rc.print("\n🗣️ Comments\n", style="plum2")
comments_found, _, drive_comments = await drive.get_comments(as_client, file_id)
if comments_found and drive_comments.items:
authors = get_comments_from_file(drive_comments)
if len(drive_comments.items) > 20:
print(f"[+] Authors ({len(authors)} found, showing the top 20) :")
else:
print("[+] Authors :")
for _, author in authors[:20]:
print(f"- 🙋 {author['name']} ({author['count']} comment{'s' if author['count'] > 1 else ''})")
else:
gb.rc.print("No comments.", style="italic")
#print("\n[Capabilities]")
gb.rc.print("\n🧙 Capabilities\n", style="dodger_blue1")
capabilities = sorted([k for k,v in inspect.getmembers(file.capabilities) if v and not k.startswith("_")])
if is_folder:
if capabilities == drive_knownledge.default_folder_capabilities:
print("[-] You don't have special permissions against this folder.")
else:
print(f"[+] You have special permissions against this folder ! ✨")
for cap in capabilities:
print(f"- {inflection.humanize(cap)}")
else:
if capabilities == drive_knownledge.default_file_capabilities:
print("[-] You don't have special permissions against this file.")
else:
print(f"[+] You have special permissions against this file ! ✨")
for cap in capabilities:
print(f"- {inflection.humanize(cap)}")
if json_file:
json_results = {
"file": file if file_found else None,
"source_app": brand if brand_found else None,
"users": users,
"comments": drive_comments if comments_found else None
}
import json
from ghunt.objects.encoders import GHuntEncoder;
with open(json_file, "w", encoding="utf-8") as f:
f.write(json.dumps(json_results, cls=GHuntEncoder, indent=4))
gb.rc.print(f"\n[+] JSON output wrote to {json_file} !", style="italic")
await as_client.aclose()

169
ghunt/modules/email.py Normal file
View File

@@ -0,0 +1,169 @@
from ghunt import globals as gb
from ghunt.helpers.utils import get_httpx_client
from ghunt.objects.base import GHuntCreds
from ghunt.apis.peoplepa import PeoplePaHttp
from ghunt.apis.vision import VisionHttp
from ghunt.helpers import gmaps, playgames, auth, calendar as gcalendar, ia
from ghunt.helpers.knowledge import get_user_type_definition
import httpx
from typing import *
async def hunt(as_client: httpx.AsyncClient, email_address: str, json_file: bool=None):
if not as_client:
as_client = get_httpx_client()
ghunt_creds = GHuntCreds()
ghunt_creds.load_creds()
if not ghunt_creds.are_creds_loaded():
exit("[-] Creds aren't loaded. Are you logged in ?")
if not auth.check_cookies(ghunt_creds.cookies):
exit("[-] Seems like the cookies are invalid. Exiting...")
#gb.rc.print("[+] Target found !", style="sea_green3")
people_pa = PeoplePaHttp(ghunt_creds)
vision_api = VisionHttp(ghunt_creds)
is_found, target = await people_pa.people_lookup(as_client, email_address, params_template="max_details")
if not is_found:
exit("\n[-] The target wasn't found.")
if json_file:
json_results = {}
containers = target.sourceIds
if len(containers) > 1 or not "PROFILE" in containers:
print("\n[!] You have this person in these containers :")
for container in containers:
print(f"- {container.title()}")
if not "PROFILE" in containers:
exit("[-] Given information does not match a public Google Account.")
container = "PROFILE"
gb.rc.print("\n🙋 Google Account data\n", style="plum2")
if container in target.names:
print(f"Name : {target.names[container].fullname}\n")
if container in target.profilePhotos:
if target.profilePhotos[container].isDefault:
print("[-] Default profile picture")
else:
print("[+] Custom profile picture !")
print(f"=> {target.profilePhotos[container].url}")
await ia.detect_face(vision_api, as_client, target.profilePhotos[container].url)
print()
if container in target.coverPhotos:
if target.coverPhotos[container].isDefault:
print("[-] Default cover picture\n")
else:
print("[+] Custom cover picture !")
print(f"=> {target.coverPhotos[container].url}")
await ia.detect_face(vision_api, as_client, target.coverPhotos[container].url)
print()
print(f"Last profile edit : {target.sourceIds[container].lastUpdated.strftime('%Y/%m/%d %H:%M:%S (UTC)')}\n")
if container in target.emails:
print(f"Email : {target.emails[container].value}")
else:
print(f"Email : {email_address}\n")
print(f"Gaia ID : {target.personId}")
if container in target.profileInfos:
print("\nUser types :")
for user_type in target.profileInfos[container].userTypes:
definition = get_user_type_definition(user_type)
gb.rc.print(f"- {user_type} [italic]({definition})[/italic]")
gb.rc.print(f"\n📞 Google Chat Extended Data\n", style="light_salmon3")
#print(f"Presence : {target.extendedData.dynamiteData.presence}")
print(f"Entity Type : {target.extendedData.dynamiteData.entityType}")
#print(f"DND State : {target.extendedData.dynamiteData.dndState}")
gb.rc.print(f"Customer ID : {x if (x := target.extendedData.dynamiteData.customerId) else '[italic]Not found.[/italic]'}")
gb.rc.print(f"\n🌐 Google Plus Extended Data\n", style="cyan")
print(f"Entreprise User : {target.extendedData.gplusData.isEntrepriseUser}")
#print(f"Content Restriction : {target.extendedData.gplusData.contentRestriction}")
if container in target.inAppReachability:
print("\n[+] Activated Google services :")
for app in target.inAppReachability[container].apps:
print(f"- {app}")
gb.rc.print("\n🎮 Play Games data", style="deep_pink2")
player_results = await playgames.search_player(ghunt_creds, as_client, email_address)
if player_results:
player_candidate = player_results[0]
print("\n[+] Found player profile !")
print(f"\nUsername : {player_candidate.name}")
print(f"Player ID : {player_candidate.id}")
print(f"Avatar : {player_candidate.avatar_url}")
_, player = await playgames.get_player(ghunt_creds, as_client, player_candidate.id)
playgames.output(player)
else:
print("\n[-] No player profile found.")
gb.rc.print("\n🗺️ Maps data", style="green4")
err, stats, reviews, photos = await gmaps.get_reviews(as_client, target.personId)
gmaps.output(err, stats, reviews, photos, target.personId)
gb.rc.print("\n🗓️ Calendar data\n", style="slate_blue3")
cal_found, calendar, calendar_events = await gcalendar.fetch_all(ghunt_creds, as_client, email_address)
if cal_found:
print("[+] Public Google Calendar found !\n")
if calendar_events.items:
if "PROFILE" in target.names:
gcalendar.out(calendar, calendar_events, email_address, target.names[container].fullname)
else:
gcalendar.out(calendar, calendar_events, email_address)
else:
print("=> No recent events found.")
else:
print("[-] No public Google Calendar.")
if json_file:
if container == "PROFILE":
json_results[f"{container}_CONTAINER"] = {
"profile": target,
"play_games": player if player_results else None,
"maps": {
"photos": photos,
"reviews": reviews,
"stats": stats
},
"calendar": {
"details": calendar,
"events": calendar_events
} if cal_found else None
}
else:
json_results[f"{container}_CONTAINER"] = {
"profile": target
}
if json_file:
import json
from ghunt.objects.encoders import GHuntEncoder;
with open(json_file, "w", encoding="utf-8") as f:
f.write(json.dumps(json_results, cls=GHuntEncoder, indent=4))
gb.rc.print(f"\n[+] JSON output wrote to {json_file} !", style="italic")
await as_client.aclose()

126
ghunt/modules/gaia.py Normal file
View File

@@ -0,0 +1,126 @@
from ghunt import globals as gb
from ghunt.objects.base import GHuntCreds
from ghunt.apis.peoplepa import PeoplePaHttp
from ghunt.apis.vision import VisionHttp
from ghunt.helpers import gmaps, auth, ia
from ghunt.helpers.knowledge import get_user_type_definition
from ghunt.helpers.utils import get_httpx_client
import httpx
from typing import *
async def hunt(as_client: httpx.AsyncClient, gaia_id: str, json_file: bool=None):
if not as_client:
as_client = get_httpx_client()
ghunt_creds = GHuntCreds()
ghunt_creds.load_creds()
if not auth.check_cookies(ghunt_creds.cookies):
exit("[-] Seems like the cookies are invalid. Exiting...")
#gb.rc.print("\n[+] Target found !", style="spring_green3")
people_pa = PeoplePaHttp(ghunt_creds)
vision_api = VisionHttp(ghunt_creds)
is_found, target = await people_pa.people(as_client, gaia_id, params_template="max_details")
if not is_found:
exit("\n[-] The target wasn't found.")
if json_file:
json_results = {}
containers = target.sourceIds
if len(containers) > 1 or not "PROFILE" in containers:
print("\n[!] You have this person in these containers :")
for container in containers:
print(f"- {container.title()}")
if not "PROFILE" in containers:
exit("[-] Given information does not match a public Google Account.")
container = "PROFILE"
gb.rc.print("\n🙋 Google Account data\n", style="plum2")
if container in target.names:
print(f"Name : {target.names[container].fullname}\n")
if container in target.profilePhotos:
if target.profilePhotos[container].isDefault:
print("[-] Default profile picture")
else:
print("[+] Custom profile picture !")
print(f"=> {target.profilePhotos[container].url}")
await ia.detect_face(vision_api, as_client, target.profilePhotos[container].url)
print()
if container in target.coverPhotos:
if target.coverPhotos[container].isDefault:
print("[-] Default cover picture\n")
else:
print("[+] Custom cover picture !")
print(f"=> {target.coverPhotos[container].url}")
await ia.detect_face(vision_api, as_client, target.coverPhotos[container].url)
print()
print(f"Last profile edit : {target.sourceIds[container].lastUpdated.strftime('%Y/%m/%d %H:%M:%S (UTC)')}\n")
print(f"Gaia ID : {target.personId}\n")
if container in target.profileInfos:
print("User types :")
for user_type in target.profileInfos[container].userTypes:
definition = get_user_type_definition(user_type)
gb.rc.print(f"- {user_type} [italic]({definition})[/italic]")
gb.rc.print(f"\n📞 Google Chat Extended Data\n", style="light_salmon3")
#print(f"Presence : {target.extendedData.dynamiteData.presence}")
print(f"Entity Type : {target.extendedData.dynamiteData.entityType}")
#print(f"DND State : {target.extendedData.dynamiteData.dndState}")
gb.rc.print(f"Customer ID : {x if (x := target.extendedData.dynamiteData.customerId) else '[italic]Not found.[/italic]'}")
gb.rc.print(f"\n🌐 Google Plus Extended Data\n", style="cyan")
print(f"Entreprise User : {target.extendedData.gplusData.isEntrepriseUser}")
#print(f"Content Restriction : {target.extendedData.gplusData.contentRestriction}")
if container in target.inAppReachability:
print("\n[+] Activated Google services :")
for app in target.inAppReachability[container].apps:
print(f"- {app}")
gb.rc.print("\n🗺️ Maps data", style="green4")
err, stats, reviews, photos = await gmaps.get_reviews(as_client, target.personId)
gmaps.output(err, stats, reviews, photos, target.personId)
if json_file:
if container == "PROFILE":
json_results[f"{container}_CONTAINER"] = {
"profile": target,
"maps": {
"photos": photos,
"reviews": reviews,
"stats": stats
}
}
else:
json_results[f"{container}_CONTAINER"] = {
"profile": target
}
if json_file:
import json
from ghunt.objects.encoders import GHuntEncoder;
with open(json_file, "w", encoding="utf-8") as f:
f.write(json.dumps(json_results, cls=GHuntEncoder, indent=4))
gb.rc.print(f"\n[+] JSON output wrote to {json_file} !", style="italic")
await as_client.aclose()

94
ghunt/modules/login.py Normal file
View File

@@ -0,0 +1,94 @@
from typing import *
import httpx
from pathlib import Path
from ghunt import globals as gb
from ghunt.helpers.utils import *
from ghunt.helpers.auth import *
from ghunt.objects.base import GHuntCreds
async def check_and_login(as_client: httpx.AsyncClient, clean: bool=False) -> None:
"""Check the users credentials validity, and generate new ones."""
ghunt_creds = GHuntCreds()
if clean:
creds_path = Path(ghunt_creds.creds_path)
if creds_path.is_file():
creds_path.unlink()
print(f"[+] Credentials file at {creds_path} deleted !")
else:
print(f"Credentials file at {creds_path} doesn't exists, no need to delete.")
exit()
if not as_client:
as_client = get_httpx_client()
ghunt_creds.load_creds()
wanted_cookies = {"SID": "", "SSID": "", "APISID": "", "SAPISID": "", "HSID": "", "LSID": "", "__Secure-3PSID": ""}
default_cookies = {"CONSENT": gb.config.default_consent_cookie, "PREF": gb.config.default_pref_cookie}
osids = ["cloudconsole", "cl"] # OSIDs to generate
new_cookies_entered = False
if not ghunt_creds.are_creds_loaded():
cookies, oauth_token = await getting_cookies_dialog(wanted_cookies)
cookies = {**cookies, **default_cookies}
new_cookies_entered = True
else:
# in case user wants to enter new cookies (example: for new account)
are_cookies_valid = check_cookies(ghunt_creds.cookies)
if are_cookies_valid:
print("\n[+] The cookies seems valid !")
are_osids_valid = check_osids(ghunt_creds.cookies, ghunt_creds.osids)
if are_osids_valid:
print("[+] The OSIDs seems valid !")
else:
print("[-] Seems like the OSIDs are invalid.")
else:
print("[-] Seems like the cookies are invalid.")
is_master_token_valid = await check_master_token(as_client, ghunt_creds.android.master_token)
if is_master_token_valid:
print("[+] The master token seems valid !")
else:
print("[-] Seems like the master token is invalid.")
new_gen_inp = input("\nDo you want to input new cookies ? (Y/n) ").lower()
if new_gen_inp == "y":
cookies, oauth_token = await getting_cookies_dialog(wanted_cookies)
new_cookies_entered = True
elif not are_cookies_valid:
await exit("Please put valid cookies. Exiting...")
# Validate cookies
if new_cookies_entered or not ghunt_creds.are_creds_loaded():
are_cookies_valid = check_cookies(cookies)
if are_cookies_valid:
print("\n[+] The cookies seems valid !")
else:
await exit("\n[-] Seems like the cookies are invalid, try regenerating them.")
if not new_cookies_entered:
await exit()
print(f"\n[+] Got OAuth2 token => {oauth_token}")
master_token, services, owner_email, owner_name = await android_master_auth(as_client, oauth_token)
print("\n[Connected account]")
print(f"Name : {owner_name}")
print(f"Email : {owner_email}")
gb.rc.print("\n🔑 [underline]A master token has been generated for your account and saved in the credentials file[/underline], please keep it safe as if it were your password, because it gives access to a lot of Google services, and with that, your personal information.", style="bold")
print(f"Master token services access : {', '.join(services)}")
# Feed the GHuntCreds object
ghunt_creds.cookies = cookies
ghunt_creds.android.master_token = master_token
print("Generating OSIDs ...")
ghunt_creds.osids = await gen_osids(cookies, osids)
ghunt_creds.save_creds()
await as_client.aclose()

View File

152
ghunt/objects/apis.py Normal file
View File

@@ -0,0 +1,152 @@
from ghunt.errors import GHuntCorruptedHeadersError
from ghunt.helpers.knowledge import get_origin_of_key, get_api_key
from ghunt.objects.base import GHuntCreds, SmartObj
from ghunt.helpers.utils import *
from ghunt.errors import *
from ghunt.helpers.auth import *
import httpx
import trio
from datetime import datetime, timezone
from typing import *
# APIs objects
class EndpointConfig(SmartObj):
def __init__(self, headers: Dict[str, str], cookies: str):
self.headers = headers
self.cookies = cookies
class GAPI(SmartObj):
def __init__(self):
self.loaded_endpoints: Dict[str, EndpointConfig] = {}
self.creds: GHuntCreds = None
self.headers: Dict[str, str] = {}
self.cookies: Dict[str, str] = {}
self.gen_token_lock: trio.Lock = None
self.authentication_mode: str = ""
self.require_key: str = ""
self.key_origin: str = ""
def _load_api(self, creds: GHuntCreds, headers: Dict[str, str]):
if not creds.are_creds_loaded():
raise GHuntInsufficientCreds(f"This API requires a loaded GHuntCreds object, but it is not.")
if not is_headers_syntax_good(headers):
raise GHuntCorruptedHeadersError(f"The provided headers when loading the endpoint seems corrupted, please check it : {headers}")
if self.authentication_mode == "oauth":
self.gen_token_lock = trio.Lock()
cookies = {}
if self.authentication_mode in ["sapisidhash", "cookies_only"]:
if not (cookies := creds.cookies):
raise GHuntInsufficientCreds(f"This endpoint requires the cookies in the GHuntCreds object, but they aren't loaded.")
if (key_name := self.require_key):
if not (api_key := get_api_key(key_name)):
raise GHuntInsufficientCreds(f"This API requires the {key_name} API key in the GHuntCreds object, but it isn't loaded.")
if not self.key_origin:
self.key_origin = get_origin_of_key(key_name)
headers = {**headers, "X-Goog-Api-Key": api_key, **headers, "Origin": self.key_origin, "Referer": self.key_origin}
if self.authentication_mode == "sapisidhash":
if not (sapisidhash := creds.cookies.get("SAPISID")):
raise GHuntInsufficientCreds(f"This endpoint requires the SAPISID cookie in the GHuntCreds object, but it isn't loaded.")
headers = {**headers, "Authorization": f"SAPISIDHASH {gen_sapisidhash(sapisidhash, self.key_origin)}"}
self.creds = creds
self.headers = headers
self.cookies = cookies
def _load_endpoint(self, endpoint_name: str,
headers: Dict[str, str]={}, ext_metadata: Dict[str, str]={}):
if endpoint_name in self.loaded_endpoints:
return
headers = {**headers, **self.headers}
# https://github.com/googleapis/googleapis/blob/f8a290120b3a67e652742a221f73778626dc3081/google/api/context.proto#L43
for ext_type,ext_value in ext_metadata.items():
ext_bin_headers = {f"X-Goog-Ext-{k}-{ext_type.title()}":v for k,v in ext_value.items()}
headers = {**headers, **ext_bin_headers}
if not is_headers_syntax_good(headers):
raise GHuntCorruptedHeadersError(f"The provided headers when loading the endpoint seems corrupted, please check it : {headers}")
self.loaded_endpoints[endpoint_name] = EndpointConfig(headers, self.cookies)
async def _check_and_gen_authorization_token(self, as_client: httpx.AsyncClient, creds: GHuntCreds):
async with self.gen_token_lock:
present = False
if self.api_name in creds.android.authorization_tokens:
present = True
token = creds.android.authorization_tokens[self.api_name]["token"]
expiry_date = datetime.utcfromtimestamp(creds.android.authorization_tokens[self.api_name]["expiry"]).replace(tzinfo=timezone.utc)
# If there are no registered authorization token for the current API, or if the token has expired
if (not self.api_name in creds.android.authorization_tokens) or (present and datetime.now(timezone.utc) > expiry_date):
token, _, expiry_timestamp = await android_oauth_app(as_client, creds.android.master_token, self.package_name, self.scopes)
creds.android.authorization_tokens[self.api_name] = {
"token": token,
"expiry": expiry_timestamp
}
creds.save_creds(silent=True)
gb.rc.print(f"\n[+] New token for {self.api_name} has been generated", style="italic")
return token
async def _query(self, as_client: httpx.AsyncClient, verb: str, endpoint_name: str, base_url: str, params: Dict[str, Any], data: Any, data_type: str) -> httpx.Response:
endpoint = self.loaded_endpoints[endpoint_name]
headers = endpoint.headers
if self.authentication_mode == "oauth":
token = await self._check_and_gen_authorization_token(as_client, self.creds)
headers = {**headers, "Authorization": f"OAuth {token}"}
if verb == "GET":
req = await as_client.get(f"{self.scheme}://{self.hostname}{base_url}",
params=params, headers=headers, cookies=endpoint.cookies)
elif verb == "POST":
if data_type == "data":
req = await as_client.post(f"{self.scheme}://{self.hostname}{base_url}",
params=params, data=data, headers=headers, cookies=endpoint.cookies)
elif data_type == "json":
req = await as_client.post(f"{self.scheme}://{self.hostname}{base_url}",
params=params, json=data, headers=headers, cookies=endpoint.cookies)
else:
raise GHuntUnknownRequestDataTypeError(f"The provided data type {data_type} wasn't recognized by GHunt.")
else:
raise GHuntUnknownVerbError(f"The provided verb {verb} wasn't recognized by GHunt.")
return req
# Others
class Parser(SmartObj):
def _merge(self, obj) -> any:
"""Merging two objects of the same class."""
def recursive_merge(obj1, obj2, module_name: str) -> any:
directions = [(obj1, obj2), (obj2, obj1)]
for direction in directions:
from_obj, target_obj = direction
for attr_name, attr_value in from_obj.__dict__.items():
class_name = get_class_name(attr_value)
if class_name.startswith(module_name) and attr_name in target_obj.__dict__:
merged_obj = recursive_merge(attr_value, target_obj.__dict__[attr_name], module_name)
target_obj.__dict__[attr_name] = merged_obj
elif not attr_name in target_obj.__dict__ or \
(attr_value and not target_obj.__dict__.get(attr_name)):
target_obj.__dict__[attr_name] = attr_value
return obj1
class_name = get_class_name(self)
module_name = self.__module__
if not get_class_name(obj).startswith(class_name):
raise GHuntObjectsMergingError("The two objects being merged aren't from the same class.")
self = recursive_merge(self, obj, module_name)

130
ghunt/objects/base.py Normal file
View File

@@ -0,0 +1,130 @@
from typing import *
from pathlib import Path
import json
from dateutil.relativedelta import relativedelta
from datetime import datetime
import base64
from autoslot import Slots
from ghunt import globals as gb
class SmartObj(Slots):
pass
class AndroidCreds(SmartObj):
def __init__(self) -> None:
self.master_token: str = ""
self.authorization_tokens: Dict = {}
class GHuntCreds(SmartObj):
"""
This object stores all the needed credentials that GHunt uses,
such as cookies, OSIDs, keys and tokens.
"""
def __init__(self, creds_path: str = "") -> None:
self.cookies: Dict[str, str] = {}
self.osids: Dict[str, str] = {}
self.android: AndroidCreds = AndroidCreds()
if not creds_path:
cwd_path = Path().home()
ghunt_folder = cwd_path / ".malfrats/ghunt"
if not ghunt_folder.is_dir():
ghunt_folder.mkdir(parents=True, exist_ok=True)
creds_path = ghunt_folder / "creds.m"
self.creds_path: str = creds_path
def are_creds_loaded(self) -> bool:
return all([self.cookies, self.osids, self.android.master_token])
def load_creds(self, silent=False) -> None:
"""Loads cookies, OSIDs and tokens if they exist"""
if Path(self.creds_path).is_file():
try:
with open(self.creds_path, "r", encoding="utf-8") as f:
raw = f.read()
data = json.loads(base64.b64decode(raw).decode())
self.cookies = data["cookies"]
self.osids = data["osids"]
self.android.master_token = data["android"]["master_token"]
self.android.authorization_tokens = data["android"]["authorization_tokens"]
if not silent:
gb.rc.print("[+] Authenticated !", style="sea_green3")
except Exception:
if not silent:
print("[-] Stored cookies are corrupted\n")
else:
if not silent:
print("[-] No stored cookies found\n")
def save_creds(self, silent=False):
"""Save cookies, OSIDs and tokens to the specified file."""
data = {
"cookies": self.cookies,
"osids": self.osids,
"android": {
"master_token": self.android.master_token,
"authorization_tokens": self.android.authorization_tokens
}
}
with open(self.creds_path, "w", encoding="utf-8") as f:
f.write(base64.b64encode(json.dumps(data, indent=2).encode()).decode())
if not silent:
print(f"\n[+] Creds have been saved in {self.creds_path} !")
### Maps
class Position(SmartObj):
def __init__(self):
self.latitude: float = 0.0
self.longitude: float = 0.0
class MapsGuidedAnswer(SmartObj):
def __init__(self):
self.id: str = ""
self.question: str = ""
self.answer: str = ""
class MapsLocation(SmartObj):
def __init__(self):
self.id: str = ""
self.name: str = ""
self.address: str = ""
self.position: Position = Position()
self.tags: List[str] = []
self.types: List[str] = []
self.cost: int = 0 # 1-4
class MapsReview(SmartObj):
def __init__(self):
self.id: str = ""
self.comment: str = ""
self.rating: int = 0
self.location: MapsLocation = MapsLocation()
self.guided_answers: List[MapsGuidedAnswer] = []
self.approximative_date: relativedelta = None
class MapsPhoto(SmartObj):
def __init__(self):
self.id: str = ""
self.url: str = ""
self.location: MapsLocation = MapsLocation()
self.approximative_date: relativedelta = None
self.exact_date: datetime = None
### Drive
class DriveExtractedUser(SmartObj):
def __init__(self):
self.gaia_id: str = ""
self.name: str = ""
self.email_address: str = ""
self.role: str = ""
self.is_last_modifying_user: bool = False

18
ghunt/objects/encoders.py Normal file
View File

@@ -0,0 +1,18 @@
import json
from datetime import datetime
class GHuntEncoder(json.JSONEncoder):
"""
Converts non-default types when exporting to JSON.
"""
def default(self, o: object) -> dict:
if isinstance(o, set):
return list(o)
elif isinstance(o, datetime):
return f"{o.strftime('%Y/%m/%d %H:%M:%S')} (UTC)"
elif type(o) not in [str, list, int, float, bool, dict]:
if hasattr(o, "__dict__"):
return o.__dict__
else:
return {x:getattr(o, x) for x in o.__slots__}

20
ghunt/objects/utils.py Normal file
View File

@@ -0,0 +1,20 @@
from ghunt.helpers.utils import *
from ghunt.errors import *
from ghunt.objects.base import SmartObj
from typing import *
class TMPrinter(SmartObj):
def __init__(self):
self.max_len = 0
def out(self, text: str):
if len(text) > self.max_len:
self.max_len = len(text)
else:
text += (" " * (self.max_len - len(text)))
print(text, end='\r')
def clear(self):
print(" " * self.max_len, end="\r")

View File

165
ghunt/parsers/calendar.py Normal file
View File

@@ -0,0 +1,165 @@
from datetime import datetime
from typing import *
from ghunt.helpers.utils import get_datetime_utc
from ghunt.objects.apis import Parser
class ConferenceProperties(Parser):
def __init__(self):
self.allowed_conference_solution_types: List[str] = []
def _scrape(self, conference_props_data: Dict[str, any]):
if (types := conference_props_data.get("allowedConferenceSolutionTypes")):
self.allowed_conference_solution_types = types
class Calendar(Parser):
def __init__(self):
self.id: str = ""
self.summary: str = ""
self.time_zone: str = ""
self.conference_properties: ConferenceProperties = ConferenceProperties()
def _scrape(self, calendar_data: Dict[str, any]):
self.id = calendar_data.get("id")
self.summary = calendar_data.get("summary")
self.time_zone = calendar_data.get("timeZone")
conference_props_data = calendar_data.get("conferenceProperties")
if conference_props_data:
self.conference_properties._scrape(conference_props_data)
class CalendarReminder(Parser):
def __init__(self):
self.method: str = ""
self.minutes: int = 0
def _scrape(self, reminder_data: Dict[str, any]):
self.method = reminder_data.get("method")
self.minutes = reminder_data.get("minutes")
class CalendarPerson(Parser):
def __init__(self):
self.email: str = ""
self.display_name: str = ""
self.self: bool = None
def _scrape(self, person_data: Dict[str, any]):
self.email = person_data.get("email")
self.display_name = person_data.get("displayName")
self.self = person_data.get("self")
class CalendarTime(Parser):
def __init__(self):
self.date_time: datetime = None # ISO Format
self.time_zone: str = ""
def _scrape(self, time_data: Dict[str, any]):
if (date_time := time_data.get("dateTime")):
try:
self.date_time = get_datetime_utc(date_time)
except ValueError:
self.date_time = None
self.time_zone = time_data.get("timeZone")
class CalendarReminders(Parser):
def __init__(self):
self.use_default: int = 0
self.overrides: List[CalendarReminder] = []
def _scrape(self, reminders_data: Dict[str, any]):
self.use_default = reminders_data.get("useDefault")
if (overrides := reminders_data.get("overrides")):
for reminder_data in overrides:
reminder = CalendarReminder()
reminder._scrape(reminder_data)
self.overrides.append(reminder)
class CalendarEvent(Parser):
def __init__(self):
self.id: str = ""
self.status: str = ""
self.html_link: str = ""
self.created: datetime = "" # ISO Format
self.updated: datetime = "" # ISO Format
self.summary: str = ""
self.description: str = ""
self.location: str = ""
self.creator: CalendarPerson = CalendarPerson()
self.organizer: CalendarPerson = CalendarPerson()
self.start: CalendarTime = CalendarTime()
self.end: CalendarTime = CalendarTime()
self.recurring_event_id: str = ""
self.original_start_time: CalendarTime = CalendarTime()
self.visibility: str = ""
self.ical_uid: str = ""
self.sequence: int = 0
self.guest_can_invite_others: bool = None
self.reminders: CalendarReminders = CalendarReminders()
self.event_type: str = ""
def _scrape(self, event_data: Dict[str, any]):
self.id = event_data.get("id")
self.status = event_data.get("status")
self.html_link = event_data.get("htmlLink")
if (date_time := event_data.get("created")):
try:
self.created = get_datetime_utc(date_time)
except ValueError:
self.created = None
if (date_time := event_data.get("updated")):
try:
self.updated = get_datetime_utc(date_time)
except ValueError:
self.updated = None
self.summary = event_data.get("summary")
self.description = event_data.get("description")
self.location = event_data.get("location")
if (creator_data := event_data.get("creator")):
self.creator._scrape(creator_data)
if (organizer_data := event_data.get("organizer")):
self.organizer._scrape(organizer_data)
if (start_data := event_data.get("start")):
self.start._scrape(start_data)
if (end_data := event_data.get("end")):
self.end._scrape(end_data)
self.recurring_event_id = event_data.get("recurringEventId")
if (original_start_data := event_data.get("originalStartTime")):
self.original_start_time._scrape(original_start_data)
self.visibility = event_data.get("visibility")
self.ical_uid = event_data.get("iCalUID")
self.sequence = event_data.get("sequence")
self.guest_can_invite_others = event_data.get("guestsCanInviteOthers")
if (reminders_data := event_data.get("reminders")):
self.reminders._scrape(reminders_data)
self.event_type = event_data.get("eventType")
class CalendarEvents(Parser):
def __init__(self):
self.summary: str = ""
self.updated: datetime = "" # ISO Format
self.time_zone: str = ""
self.access_role: str = ""
self.default_reminders: List[CalendarReminder] = []
self.next_page_token: str = ""
self.items: List[CalendarEvent] = []
def _scrape(self, events_data: Dict[str, any]):
self.summary = events_data.get("summary")
if (date_time := events_data.get("updated")):
try:
self.updated = get_datetime_utc(date_time)
except ValueError:
self.updated = None
self.time_zone = events_data.get("timeZone")
self.access_role = events_data.get("accessRole")
if (reminders_data := events_data.get("defaultReminders")):
for reminder_data in reminders_data:
reminder = CalendarReminder()
reminder._scrape(reminder_data)
self.default_reminders.append(reminder)
self.next_page_token = events_data.get("nextPageToken")
if (items_data := events_data.get("items")):
for item_data in items_data:
event = CalendarEvent()
event._scrape(item_data)
self.items.append(event)

View File

@@ -0,0 +1,205 @@
from typing import *
from ghunt.objects.apis import Parser
class CacBrand(Parser):
def __init__(self):
self.brand_id: str = ""
self.project_ids: List[list] = []
self.project_numbers: List[str] = []
self.display_name: str = ""
self.icon_url: str = ""
self.stored_icon_url: str = ""
self.support_email: str = ""
self.home_page_url: str = ""
self.terms_of_service_urls: List[list] = []
self.privacy_policy_urls: List[list] = []
self.direct_notice_to_parents_url: str = ""
self.brand_state: CacBrandState = CacBrandState()
self.clients: List[list] = []
self.review: CacReview = CacReview()
self.is_org_internal: bool = False
self.risc_configuration: CacRiscConfiguration = CacRiscConfiguration()
self.consistency_token: str = ""
self.creation_time: str = ""
self.verified_brand: CacVerifiedBrand = CacVerifiedBrand()
def _scrape(self, base_model_data: Dict[str, any]):
self.brand_id = base_model_data.get('brandId')
self.project_ids = base_model_data.get('projectIds')
self.project_numbers = base_model_data.get('projectNumbers')
self.display_name = base_model_data.get('displayName')
self.icon_url = base_model_data.get('iconUrl')
self.stored_icon_url = base_model_data.get('storedIconUrl')
self.support_email = base_model_data.get('supportEmail')
self.home_page_url = base_model_data.get('homePageUrl')
self.terms_of_service_urls = base_model_data.get('termsOfServiceUrls')
self.privacy_policy_urls = base_model_data.get('privacyPolicyUrls')
self.direct_notice_to_parents_url = base_model_data.get('directNoticeToParentsUrl')
if (brand_state_data := base_model_data.get('brandState')):
self.brand_state._scrape(brand_state_data)
self.clients = base_model_data.get('clients')
if (review_data := base_model_data.get('review')):
self.review._scrape(review_data)
self.is_org_internal = base_model_data.get('isOrgInternal')
if (risc_configuration_data := base_model_data.get('riscConfiguration')):
self.risc_configuration._scrape(risc_configuration_data)
self.consistency_token = base_model_data.get('consistencyToken')
self.creation_time = base_model_data.get('creationTime')
if (verified_brand_data := base_model_data.get('verifiedBrand')):
self.verified_brand._scrape(verified_brand_data)
class CacBrandState(Parser):
def __init__(self):
self.state: str = ""
self.admin_id: str = ""
self.reason: str = ""
self.limits: CacLimits = CacLimits()
self.brand_setup: str = ""
self.creation_flow: str = ""
self.update_timestamp: str = ""
def _scrape(self, brand_state_data: Dict[str, any]):
self.state = brand_state_data.get('state')
self.admin_id = brand_state_data.get('adminId')
self.reason = brand_state_data.get('reason')
if (limits_data := brand_state_data.get('limits')):
self.limits._scrape(limits_data)
self.brand_setup = brand_state_data.get('brandSetup')
self.creation_flow = brand_state_data.get('creationFlow')
self.update_timestamp = brand_state_data.get('updateTimestamp')
class CacLimits(Parser):
def __init__(self):
self.approval_quota_multiplier: int = 0
self.max_domain_count: int = 0
self.default_max_client_count: int = 0
def _scrape(self, limits_data: Dict[str, int]):
self.approval_quota_multiplier = limits_data.get('approvalQuotaMultiplier')
self.max_domain_count = limits_data.get('maxDomainCount')
self.default_max_client_count = limits_data.get('defaultMaxClientCount')
class CacReview(Parser):
def __init__(self):
self.has_abuse_verdict: bool = False
self.is_published: bool = False
self.review_state: str = ""
self.high_risk_scopes_privilege: str = ""
self.low_risk_scopes: List[list] = []
self.pending_scopes: List[list] = []
self.exempt_scopes: List[list] = []
self.approved_scopes: List[list] = []
self.historical_approved_scopes: List[list] = []
self.pending_domains: List[str] = []
self.approved_domains: List[list] = []
self.enforce_request_scopes: bool = False
self.category: List[list] = []
self.decision_timestamp: str = ""
def _scrape(self, review_data: Dict[str, any]):
self.has_abuse_verdict = review_data.get('hasAbuseVerdict')
self.is_published = review_data.get('isPublished')
self.review_state = review_data.get('reviewState')
self.high_risk_scopes_privilege = review_data.get('highRiskScopesPrivilege')
self.low_risk_scopes = review_data.get('lowRiskScopes')
self.pending_scopes = review_data.get('pendingScopes')
self.exempt_scopes = review_data.get('exemptScopes')
self.approved_scopes = review_data.get('approvedScopes')
self.historical_approved_scopes = review_data.get('historicalApprovedScopes')
self.pending_domains = review_data.get('pendingDomains')
self.approved_domains = review_data.get('approvedDomains')
self.enforce_request_scopes = review_data.get('enforceRequestScopes')
self.category = review_data.get('category')
self.decision_timestamp = review_data.get('decisionTimestamp')
class CacRiscConfiguration(Parser):
def __init__(self):
self.enabled: bool = False
self.delivery_method: str = ""
self.receiver_supported_event_type: List[list] = []
self.legal_agreement: List[str] = []
def _scrape(self, risc_configuration_data: Dict[str, any]):
self.enabled = risc_configuration_data.get('enabled')
self.delivery_method = risc_configuration_data.get('deliveryMethod')
self.receiver_supported_event_type = risc_configuration_data.get('receiverSupportedEventType')
self.legal_agreement = risc_configuration_data.get('legalAgreement')
class CacVerifiedBrand(Parser):
def __init__(self):
self.display_name: CacDisplayName = CacDisplayName()
self.stored_icon_url: CacStoredIconUrl = CacStoredIconUrl()
self.support_email: CacSupportEmail = CacSupportEmail()
self.home_page_url: CacHomePageUrl = CacHomePageUrl()
self.privacy_policy_url: CacPrivacyPolicyUrl = CacPrivacyPolicyUrl()
self.terms_of_service_url: CacTermsOfServiceUrl = CacTermsOfServiceUrl()
def _scrape(self, verified_brand_data: Dict[str, any]):
if (display_name_data := verified_brand_data.get('displayName')):
self.display_name._scrape(display_name_data)
if (stored_icon_url_data := verified_brand_data.get('storedIconUrl')):
self.stored_icon_url._scrape(stored_icon_url_data)
if (support_email_data := verified_brand_data.get('supportEmail')):
self.support_email._scrape(support_email_data)
if (home_page_url_data := verified_brand_data.get('homePageUrl')):
self.home_page_url._scrape(home_page_url_data)
if (privacy_policy_url_data := verified_brand_data.get('privacyPolicyUrl')):
self.privacy_policy_url._scrape(privacy_policy_url_data)
if (terms_of_service_url_data := verified_brand_data.get('termsOfServiceUrl')):
self.terms_of_service_url._scrape(terms_of_service_url_data)
class CacDisplayName(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, display_name_data: Dict[str, str]):
self.value = display_name_data.get('value')
self.reason = display_name_data.get('reason')
class CacStoredIconUrl(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, stored_icon_url_data: Dict[str, str]):
self.value = stored_icon_url_data.get('value')
self.reason = stored_icon_url_data.get('reason')
class CacSupportEmail(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, support_email_data: Dict[str, str]):
self.value = support_email_data.get('value')
self.reason = support_email_data.get('reason')
class CacHomePageUrl(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, home_page_url_data: Dict[str, str]):
self.value = home_page_url_data.get('value')
self.reason = home_page_url_data.get('reason')
class CacPrivacyPolicyUrl(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, privacy_policy_url_data: Dict[str, str]):
self.value = privacy_policy_url_data.get('value')
self.reason = privacy_policy_url_data.get('reason')
class CacTermsOfServiceUrl(Parser):
def __init__(self):
self.value: str = ""
self.reason: str = ""
def _scrape(self, terms_of_service_url_data: Dict[str, str]):
self.value = terms_of_service_url_data.get('value')
self.reason = terms_of_service_url_data.get('reason')

738
ghunt/parsers/drive.py Normal file
View File

@@ -0,0 +1,738 @@
from typing import *
from datetime import datetime
from ghunt.helpers.utils import get_datetime_utc
from ghunt.objects.apis import Parser
class DriveFile(Parser):
def __init__(self):
self.kind: str = ""
self.id: str = ""
self.thumbnail_version: str = ""
self.title: str = ""
self.mime_type: str = ""
self.labels: DriveLabels = DriveLabels()
self.created_date: datetime = None
self.modified_date: datetime = None
self.last_viewed_by_me_date: datetime = None
self.marked_viewed_by_me_date: datetime = None
self.shared_with_me_date: datetime = None
self.recency: datetime = None
self.recency_reason: str = ""
self.version: str = ""
self.parents: List[DriveParentReference] = []
self.user_permission: DrivePermission = DrivePermission()
self.file_extension: str = ""
self.file_size: str = ""
self.quota_bytes_used: str = ""
self.owners: List[DriveUser] = []
self.last_modifying_user: DriveUser = DriveUser()
self.capabilities: DriveCapabilities = DriveCapabilities()
self.copyable: bool = False
self.shared: bool = False
self.explicitly_trashed: bool = False
self.authorized_app_ids: List[str] = []
self.primary_sync_parent_id: str = ""
self.subscribed: bool = False
self.passively_subscribed: bool = False
self.flagged_for_abuse: bool = False
self.abuse_is_appealable: bool = False
self.source_app_id: str = ""
self.spaces: List[str] = []
self.has_thumbnail: bool = False
self.contains_unsubscribed_children: bool = False
self.alternate_link: str = ""
self.icon_link: str = ""
self.copy_requires_writer_permission: bool = False
self.permissions: List[DrivePermission] = []
self.head_revision_id: str = ""
self.video_media_metadata: DriveVideoMediaMetadata = DriveVideoMediaMetadata()
self.has_legacy_blob_comments: bool = False
self.label_info: DriveLabelInfo = DriveLabelInfo()
self.web_content_link: str = ""
self.thumbnail_link: str = ""
self.description: str = ""
self.original_filename: str = ""
self.permissions_summary: DrivePermissionsSummary = DrivePermissionsSummary()
self.full_file_extension: str = ""
self.md5_checksum: str = ""
self.owned_by_me: bool = False
self.writers_can_share: bool = False
self.image_media_metadata: DriveImageMediaMetadata = DriveImageMediaMetadata()
self.is_app_authorized: bool = False
self.link_share_metadata: DriveLinkShareMetadata = DriveLinkShareMetadata()
self.etag: str = ""
self.self_link: str = ""
self.embed_link: str = ""
self.open_with_links: DriveOpenWithLinks = DriveOpenWithLinks()
self.default_open_with_link: str = ""
self.has_child_folders: bool = False
self.owner_names: List[str] = []
self.last_modifying_user_name: str = ""
self.editable: bool = False
self.app_data_contents: bool = False
self.drive_source: DriveSource = DriveSource()
self.source: DriveSource = DriveSource()
self.descendant_of_root: bool = False
self.folder_color: str = ""
self.folder_properties: DriveFolderProperties = DriveFolderProperties()
self.resource_key: str = ""
self.has_augmented_permissions: bool = False
self.ancestor_has_augmented_permissions: bool = False
self.has_visitor_permissions: bool = False
self.primary_domain_name: str = ""
self.organization_display_name: str = ""
self.customer_id: str = ""
self.team_drive_id: str = ""
self.folder_color_rgb: str = ""
def _scrape(self, file_data: Dict[str, any]):
self.kind = file_data.get('kind')
self.id = file_data.get('id')
self.thumbnail_version = file_data.get('thumbnailVersion')
self.title = file_data.get('title')
self.mime_type = file_data.get('mimeType')
if (labels_data := file_data.get('labels')):
self.labels._scrape(labels_data)
if (isodate := file_data.get("createdDate")):
self.created_date = get_datetime_utc(isodate)
if (isodate := file_data.get("modifiedDate")):
self.modified_date = get_datetime_utc(isodate)
if (isodate := file_data.get("lastViewedByMeDate")):
self.last_viewed_by_me_date = get_datetime_utc(isodate)
if (isodate := file_data.get("markedViewedByMeDate")):
self.marked_viewed_by_me_date = get_datetime_utc(isodate)
if (isodate := file_data.get("sharedWithMeDate")):
self.shared_with_me_date = get_datetime_utc(isodate)
if (isodate := file_data.get("recency")):
self.recency = get_datetime_utc(isodate)
self.recency_reason = file_data.get('recencyReason')
self.version = file_data.get('version')
if (parents_data := file_data.get('parents')):
for parents_data_item in parents_data:
parents_item = DriveParentReference()
parents_item._scrape(parents_data_item)
self.parents.append(parents_item)
if (user_permission_data := file_data.get('userPermission')):
self.user_permission._scrape(user_permission_data)
self.file_extension = file_data.get('fileExtension')
self.file_size = file_data.get('fileSize')
self.quota_bytes_used = file_data.get('quotaBytesUsed')
if (owners_data := file_data.get('owners')):
for owners_data_item in owners_data:
owners_item = DriveUser()
owners_item._scrape(owners_data_item)
self.owners.append(owners_item)
if (last_modifying_user_data := file_data.get('lastModifyingUser')):
self.last_modifying_user._scrape(last_modifying_user_data)
if (capabilities_data := file_data.get('capabilities')):
self.capabilities._scrape(capabilities_data)
self.copyable = file_data.get('copyable')
self.shared = file_data.get('shared')
self.explicitly_trashed = file_data.get('explicitlyTrashed')
self.authorized_app_ids = file_data.get('authorizedAppIds')
self.primary_sync_parent_id = file_data.get('primarySyncParentId')
self.subscribed = file_data.get('subscribed')
self.passively_subscribed = file_data.get('passivelySubscribed')
self.flagged_for_abuse = file_data.get('flaggedForAbuse')
self.abuse_is_appealable = file_data.get('abuseIsAppealable')
self.source_app_id = file_data.get('sourceAppId')
self.spaces = file_data.get('spaces')
self.has_thumbnail = file_data.get('hasThumbnail')
self.contains_unsubscribed_children = file_data.get('containsUnsubscribedChildren')
self.alternate_link = file_data.get('alternateLink')
self.icon_link = file_data.get('iconLink')
self.copy_requires_writer_permission = file_data.get('copyRequiresWriterPermission')
if (permissions_data := file_data.get('permissions')):
for permissions_data_item in permissions_data:
permissions_item = DrivePermission()
permissions_item._scrape(permissions_data_item)
self.permissions.append(permissions_item)
self.head_revision_id = file_data.get('headRevisionId')
if (video_media_metadata_data := file_data.get('videoMediaMetadata')):
self.video_media_metadata._scrape(video_media_metadata_data)
self.has_legacy_blob_comments = file_data.get('hasLegacyBlobComments')
if (label_info_data := file_data.get('labelInfo')):
self.label_info._scrape(label_info_data)
self.web_content_link = file_data.get('webContentLink')
self.thumbnail_link = file_data.get('thumbnailLink')
self.description = file_data.get('description')
self.original_filename = file_data.get('originalFilename')
if (permissions_summary_data := file_data.get('permissionsSummary')):
self.permissions_summary._scrape(permissions_summary_data)
self.full_file_extension = file_data.get('fullFileExtension')
self.md5_checksum = file_data.get('md5Checksum')
self.owned_by_me = file_data.get('ownedByMe')
self.writers_can_share = file_data.get('writersCanShare')
if (image_media_metadata_data := file_data.get('imageMediaMetadata')):
self.image_media_metadata._scrape(image_media_metadata_data)
self.is_app_authorized = file_data.get('isAppAuthorized')
if (link_share_metadata_data := file_data.get('linkShareMetadata')):
self.link_share_metadata._scrape(link_share_metadata_data)
self.etag = file_data.get('etag')
self.self_link = file_data.get('selfLink')
self.embed_link = file_data.get('embedLink')
if (open_with_links_data := file_data.get('openWithLinks')):
self.open_with_links._scrape(open_with_links_data)
self.default_open_with_link = file_data.get('defaultOpenWithLink')
self.has_child_folders = file_data.get('hasChildFolders')
self.owner_names = file_data.get('ownerNames')
self.last_modifying_user_name = file_data.get('lastModifyingUserName')
self.editable = file_data.get('editable')
self.app_data_contents = file_data.get('appDataContents')
if (drive_source_data := file_data.get('driveSource')):
self.drive_source._scrape(drive_source_data)
if (source_data := file_data.get('source')):
self.source._scrape(source_data)
self.descendant_of_root = file_data.get('descendantOfRoot')
self.folder_color = file_data.get('folderColor')
if (folder_properties_data := file_data.get('folderProperties')):
self.folder_properties._scrape(folder_properties_data)
self.resource_key = file_data.get('resourceKey')
self.has_augmented_permissions = file_data.get('hasAugmentedPermissions')
self.ancestor_has_augmented_permissions = file_data.get('ancestorHasAugmentedPermissions')
self.has_visitor_permissions = file_data.get('hasVisitorPermissions')
self.primary_domain_name = file_data.get('primaryDomainName')
self.organization_display_name = file_data.get('organizationDisplayName')
self.customer_id = file_data.get('customerId')
self.team_drive_id = file_data.get('teamDriveId')
self.folder_color_rgb = file_data.get('folderColorRgb')
class DriveLabels(Parser):
def __init__(self):
self.starred: bool = False
self.trashed: bool = False
self.restricted: bool = False
self.viewed: bool = False
self.hidden: bool = False
self.modified: bool = False
def _scrape(self, labels_data: Dict[str, bool]):
self.starred = labels_data.get('starred')
self.trashed = labels_data.get('trashed')
self.restricted = labels_data.get('restricted')
self.viewed = labels_data.get('viewed')
self.hidden = labels_data.get('hidden')
self.modified = labels_data.get('modified')
class DriveUserPermission(Parser):
def __init__(self):
self.role: str = ""
self.id: str = ""
self.type: str = ""
def _scrape(self, user_permission_data: Dict[str, str]):
self.role = user_permission_data.get('role')
self.id = user_permission_data.get('id')
self.type = user_permission_data.get('type')
class DriveUser(Parser):
def __init__(self):
self.kind: str = ""
self.id: str = ""
self.permission_id: str = ""
self.email_address_from_account: str = ""
self.display_name: str = ""
self.picture: DrivePicture = DrivePicture()
self.is_authenticated_user: bool = False
self.email_address: str = ""
def _scrape(self, user_data: Dict[str, any]):
self.kind = user_data.get('kind')
self.id = user_data.get('id')
self.permission_id = user_data.get('permissionId')
self.email_address_from_account = user_data.get('emailAddressFromAccount')
self.display_name = user_data.get('displayName')
if (picture_data := user_data.get('picture')):
self.picture._scrape(picture_data)
self.is_authenticated_user = user_data.get('isAuthenticatedUser')
self.email_address = user_data.get('emailAddress')
class DriveCapabilities(Parser):
def __init__(self):
self.can_add_children: bool = False
self.can_add_my_drive_parent: bool = False
self.can_block_owner: bool = False
self.can_change_security_update_enabled: bool = False
self.can_copy: bool = False
self.can_delete: bool = False
self.can_download: bool = False
self.can_edit: bool = False
self.can_edit_category_metadata: bool = False
self.can_request_approval: bool = False
self.can_move_children_within_drive: bool = False
self.can_move_item_into_team_drive: bool = False
self.can_move_item_within_drive: bool = False
self.can_read: bool = False
self.can_read_category_metadata: bool = False
self.can_remove_children: bool = False
self.can_remove_my_drive_parent: bool = False
self.can_rename: bool = False
self.can_share: bool = False
self.can_share_child_files: bool = False
self.can_share_child_folders: bool = False
self.can_trash: bool = False
self.can_untrash: bool = False
self.can_comment: bool = False
self.can_move_item_out_of_drive: bool = False
self.can_add_as_owner: bool = False
self.can_add_as_organizer: bool = False
self.can_add_as_file_organizer: bool = False
self.can_add_as_writer: bool = False
self.can_add_as_commenter: bool = False
self.can_add_as_reader: bool = False
self.can_change_to_owner: bool = False
self.can_change_to_organizer: bool = False
self.can_change_to_file_organizer: bool = False
self.can_change_to_writer: bool = False
self.can_change_to_commenter: bool = False
self.can_change_to_reader: bool = False
self.can_change_to_reader_on_published_view: bool = False
self.can_remove: bool = False
self.can_accept_ownership: bool = False
self.can_add_encrypted_children: bool = False
self.can_change_copy_requires_writer_permission: bool = False
self.can_change_permission_expiration: bool = False
self.can_change_restricted_download: bool = False
self.can_change_writers_can_share: bool = False
self.can_create_decrypted_copy: bool = False
self.can_create_encrypted_copy: bool = False
self.can_list_children: bool = False
self.can_manage_members: bool = False
self.can_manage_visitors: bool = False
self.can_modify_content: bool = False
self.can_modify_content_restriction: bool = False
self.can_modify_labels: bool = False
self.can_print: bool = False
self.can_read_all_permissions: bool = False
self.can_read_labels: bool = False
self.can_read_revisions: bool = False
self.can_set_missing_required_fields: bool = False
self.can_share_as_commenter: bool = False
self.can_share_as_file_organizer: bool = False
self.can_share_as_organizer: bool = False
self.can_share_as_owner: bool = False
self.can_share_as_reader: bool = False
self.can_share_as_writer: bool = False
self.can_share_published_view_as_reader: bool = False
self.can_share_to_all_users: bool = False
self.can_add_folder_from_another_drive: bool = False
self.can_delete_children: bool = False
self.can_move_item_out_of_team_drive: bool = False
self.can_move_item_within_team_drive: bool = False
self.can_move_team_drive_item: bool = False
self.can_read_team_drive: bool = False
self.can_trash_children: bool = False
def _scrape(self, capabilities_data: Dict[str, bool]):
self.can_add_children = capabilities_data.get('canAddChildren')
self.can_add_my_drive_parent = capabilities_data.get('canAddMyDriveParent')
self.can_block_owner = capabilities_data.get('canBlockOwner')
self.can_change_security_update_enabled = capabilities_data.get('canChangeSecurityUpdateEnabled')
self.can_copy = capabilities_data.get('canCopy')
self.can_delete = capabilities_data.get('canDelete')
self.can_download = capabilities_data.get('canDownload')
self.can_edit = capabilities_data.get('canEdit')
self.can_edit_category_metadata = capabilities_data.get('canEditCategoryMetadata')
self.can_request_approval = capabilities_data.get('canRequestApproval')
self.can_move_children_within_drive = capabilities_data.get('canMoveChildrenWithinDrive')
self.can_move_item_into_team_drive = capabilities_data.get('canMoveItemIntoTeamDrive')
self.can_move_item_within_drive = capabilities_data.get('canMoveItemWithinDrive')
self.can_read = capabilities_data.get('canRead')
self.can_read_category_metadata = capabilities_data.get('canReadCategoryMetadata')
self.can_remove_children = capabilities_data.get('canRemoveChildren')
self.can_remove_my_drive_parent = capabilities_data.get('canRemoveMyDriveParent')
self.can_rename = capabilities_data.get('canRename')
self.can_share = capabilities_data.get('canShare')
self.can_share_child_files = capabilities_data.get('canShareChildFiles')
self.can_share_child_folders = capabilities_data.get('canShareChildFolders')
self.can_trash = capabilities_data.get('canTrash')
self.can_untrash = capabilities_data.get('canUntrash')
self.can_comment = capabilities_data.get('canComment')
self.can_move_item_out_of_drive = capabilities_data.get('canMoveItemOutOfDrive')
self.can_add_as_owner = capabilities_data.get('canAddAsOwner')
self.can_add_as_organizer = capabilities_data.get('canAddAsOrganizer')
self.can_add_as_file_organizer = capabilities_data.get('canAddAsFileOrganizer')
self.can_add_as_writer = capabilities_data.get('canAddAsWriter')
self.can_add_as_commenter = capabilities_data.get('canAddAsCommenter')
self.can_add_as_reader = capabilities_data.get('canAddAsReader')
self.can_change_to_owner = capabilities_data.get('canChangeToOwner')
self.can_change_to_organizer = capabilities_data.get('canChangeToOrganizer')
self.can_change_to_file_organizer = capabilities_data.get('canChangeToFileOrganizer')
self.can_change_to_writer = capabilities_data.get('canChangeToWriter')
self.can_change_to_commenter = capabilities_data.get('canChangeToCommenter')
self.can_change_to_reader = capabilities_data.get('canChangeToReader')
self.can_change_to_reader_on_published_view = capabilities_data.get('canChangeToReaderOnPublishedView')
self.can_remove = capabilities_data.get('canRemove')
self.can_accept_ownership = capabilities_data.get('canAcceptOwnership')
self.can_add_encrypted_children = capabilities_data.get('canAddEncryptedChildren')
self.can_change_copy_requires_writer_permission = capabilities_data.get('canChangeCopyRequiresWriterPermission')
self.can_change_permission_expiration = capabilities_data.get('canChangePermissionExpiration')
self.can_change_restricted_download = capabilities_data.get('canChangeRestrictedDownload')
self.can_change_writers_can_share = capabilities_data.get('canChangeWritersCanShare')
self.can_create_decrypted_copy = capabilities_data.get('canCreateDecryptedCopy')
self.can_create_encrypted_copy = capabilities_data.get('canCreateEncryptedCopy')
self.can_list_children = capabilities_data.get('canListChildren')
self.can_manage_members = capabilities_data.get('canManageMembers')
self.can_manage_visitors = capabilities_data.get('canManageVisitors')
self.can_modify_content = capabilities_data.get('canModifyContent')
self.can_modify_content_restriction = capabilities_data.get('canModifyContentRestriction')
self.can_modify_labels = capabilities_data.get('canModifyLabels')
self.can_print = capabilities_data.get('canPrint')
self.can_read_all_permissions = capabilities_data.get('canReadAllPermissions')
self.can_read_labels = capabilities_data.get('canReadLabels')
self.can_read_revisions = capabilities_data.get('canReadRevisions')
self.can_set_missing_required_fields = capabilities_data.get('canSetMissingRequiredFields')
self.can_share_as_commenter = capabilities_data.get('canShareAsCommenter')
self.can_share_as_file_organizer = capabilities_data.get('canShareAsFileOrganizer')
self.can_share_as_organizer = capabilities_data.get('canShareAsOrganizer')
self.can_share_as_owner = capabilities_data.get('canShareAsOwner')
self.can_share_as_reader = capabilities_data.get('canShareAsReader')
self.can_share_as_writer = capabilities_data.get('canShareAsWriter')
self.can_share_published_view_as_reader = capabilities_data.get('canSharePublishedViewAsReader')
self.can_share_to_all_users = capabilities_data.get('canShareToAllUsers')
self.can_add_folder_from_another_drive = capabilities_data.get('canAddFolderFromAnotherDrive')
self.can_delete_children = capabilities_data.get('canDeleteChildren')
self.can_move_item_out_of_team_drive = capabilities_data.get('canMoveItemOutOfTeamDrive')
self.can_move_item_within_team_drive = capabilities_data.get('canMoveItemWithinTeamDrive')
self.can_move_team_drive_item = capabilities_data.get('canMoveTeamDriveItem')
self.can_read_team_drive = capabilities_data.get('canReadTeamDrive')
self.can_trash_children = capabilities_data.get('canTrashChildren')
class DriveVideoMediaMetadata(Parser):
def __init__(self):
self.width: int = 0
self.height: int = 0
self.duration_millis: str = ""
def _scrape(self, video_media_metadata_data: Dict[str, any]):
self.width = video_media_metadata_data.get('width')
self.height = video_media_metadata_data.get('height')
self.duration_millis = video_media_metadata_data.get('durationMillis')
class DriveLabelInfo(Parser):
def __init__(self):
self.label_count: int = 0
self.incomplete: bool = False
def _scrape(self, label_info_data: Dict[str, any]):
self.label_count = label_info_data.get('labelCount')
self.incomplete = label_info_data.get('incomplete')
class DrivePermission(Parser):
def __init__(self):
self.kind: str = ""
self.id: str = ""
self.self_link: str = ""
self.role: str = ""
self.additional_roles: List[str] = []
self.type: str = ""
self.selectable_roles: List[list] = []
self.pending_owner: bool = False
self.with_link: bool = False
self.capabilities: DriveCapabilities = DriveCapabilities()
self.user_id: str = ""
self.name: str = ""
self.email_address: str = ""
self.domain: str = ""
self.photo_link: str = ""
self.deleted: bool = False
self.is_collaborator_account: bool = False
def _scrape(self, permission_data: Dict[str, any]):
self.kind = permission_data.get('kind')
self.id = permission_data.get('id')
self.self_link = permission_data.get('selfLink')
self.role = permission_data.get('role')
self.additional_roles = permission_data.get('additionalRoles', [])
self.type = permission_data.get('type')
self.selectable_roles = permission_data.get('selectableRoles')
self.pending_owner = permission_data.get('pendingOwner')
self.with_link = permission_data.get('withLink')
if (capabilities_data := permission_data.get('capabilities')):
self.capabilities._scrape(capabilities_data)
self.user_id = permission_data.get('userId')
self.name = permission_data.get('name')
self.email_address = permission_data.get('emailAddress')
self.domain = permission_data.get('domain')
self.photo_link = permission_data.get('photoLink')
self.deleted = permission_data.get('deleted')
self.is_collaborator_account = permission_data.get('isCollaboratorAccount')
class DrivePermissionsSummary(Parser):
def __init__(self):
self.entry_count: int = 0
self.visibility: List[DriveMiniPermission] = []
self.select_permissions: List[DrivePermission] = []
def _scrape(self, permissions_summary_data: Dict[str, any]):
self.entry_count = permissions_summary_data.get('entryCount')
if (visibility_data := permissions_summary_data.get('visibility')):
for visibility_data_item in visibility_data:
visibility_item = DriveMiniPermission()
visibility_item._scrape(visibility_data_item)
self.visibility.append(visibility_item)
if (select_permissions_data := permissions_summary_data.get('selectPermissions')):
for select_permissions_data_item in select_permissions_data:
select_permissions_item = DrivePermission()
select_permissions_item._scrape(select_permissions_data_item)
self.select_permissions.append(select_permissions_item)
class DriveMiniPermission(Parser):
def __init__(self):
self.permission_id: str = ""
self.role: str = ""
self.type: str = ""
self.with_link: bool = False
def _scrape(self, unknown_model4_data: Dict[str, any]):
self.permission_id = unknown_model4_data.get('permissionId')
self.role = unknown_model4_data.get('role')
self.type = unknown_model4_data.get('type')
self.with_link = unknown_model4_data.get('withLink')
class DrivePicture(Parser):
def __init__(self):
self.url: str = ""
def _scrape(self, picture_data: Dict[str, str]):
self.url = picture_data.get('url')
class DriveImageMediaMetadata(Parser):
def __init__(self):
self.width: int = 0
self.height: int = 0
self.rotation: int = 0
def _scrape(self, image_media_metadata_data: Dict[str, int]):
self.width = image_media_metadata_data.get('width')
self.height = image_media_metadata_data.get('height')
self.rotation = image_media_metadata_data.get('rotation')
class DriveLinkShareMetadata(Parser):
def __init__(self):
self.security_update_eligible: bool = False
self.security_update_enabled: bool = False
self.security_update_change_disabled_reason: str = ""
self.security_update_explicitly_set: bool = False
def _scrape(self, link_share_metadata_data: Dict[str, any]):
self.security_update_eligible = link_share_metadata_data.get('securityUpdateEligible')
self.security_update_enabled = link_share_metadata_data.get('securityUpdateEnabled')
self.security_update_change_disabled_reason = link_share_metadata_data.get('securityUpdateChangeDisabledReason')
self.security_update_explicitly_set = link_share_metadata_data.get('securityUpdateExplicitlySet')
class DriveChildList(Parser):
def __init__(self):
self.kind: str = ""
self.etag: str = ""
self.self_link: str = ""
self.items: List[DriveChildReference] = []
def _scrape(self, child_list_data: Dict[str, any]):
self.kind = child_list_data.get('kind')
self.etag = child_list_data.get('etag')
self.self_link = child_list_data.get('selfLink')
if (items_data := child_list_data.get('items')):
for items_data_item in items_data:
items_item = DriveChildReference()
items_item._scrape(items_data_item)
self.items.append(items_item)
class DriveChildReference(Parser):
def __init__(self):
self.id: str = ""
self.self_link: str = ""
self.kind: str = ""
self.child_link: str = ""
def _scrape(self, child_reference_data: Dict[str, str]):
self.id = child_reference_data.get('id')
self.self_link = child_reference_data.get('selfLink')
self.kind = child_reference_data.get('kind')
self.child_link = child_reference_data.get('childLink')
class DriveApp(Parser):
def __init__(self):
self.kind: str = ""
self.id: str = ""
self.name: str = ""
self.type: str = ""
self.short_description: str = ""
self.long_description: str = ""
self.supports_create: bool = False
self.supports_import: bool = False
self.supports_multi_open: bool = False
self.supports_offline_create: bool = False
self.supports_mobile_browser: bool = False
self.installed: bool = False
self.authorized: bool = False
self.drive_branded_app: bool = False
self.drive_branded: bool = False
self.hidden: bool = False
self.removable: bool = False
self.has_drive_wide_scope: bool = False
self.use_by_default: bool = False
self.primary_mime_types: List[str] = []
self.requires_authorization_before_open_with: bool = False
self.supports_team_drives: bool = False
self.supports_all_drives: bool = False
def _scrape(self, app_data: Dict[str, any]):
self.kind = app_data.get('kind')
self.id = app_data.get('id')
self.name = app_data.get('name')
self.type = app_data.get('type')
self.short_description = app_data.get('shortDescription')
self.long_description = app_data.get('longDescription')
self.supports_create = app_data.get('supportsCreate')
self.supports_import = app_data.get('supportsImport')
self.supports_multi_open = app_data.get('supportsMultiOpen')
self.supports_offline_create = app_data.get('supportsOfflineCreate')
self.supports_mobile_browser = app_data.get('supportsMobileBrowser')
self.installed = app_data.get('installed')
self.authorized = app_data.get('authorized')
self.drive_branded_app = app_data.get('driveBrandedApp')
self.drive_branded = app_data.get('driveBranded')
self.hidden = app_data.get('hidden')
self.removable = app_data.get('removable')
self.has_drive_wide_scope = app_data.get('hasDriveWideScope')
self.use_by_default = app_data.get('useByDefault')
self.primary_mime_types = app_data.get('primaryMimeTypes')
self.requires_authorization_before_open_with = app_data.get('requiresAuthorizationBeforeOpenWith')
self.supports_team_drives = app_data.get('supportsTeamDrives')
self.supports_all_drives = app_data.get('supportsAllDrives')
class DriveOpenWithLinks(Parser):
def __init__(self):
self.digitsfield: str = ""
def _scrape(self, open_with_links_data: Dict[str, str]):
self.digitsfield = open_with_links_data.get('digits_field')
class DriveParentReference(Parser):
def __init__(self):
self.kind: str = ""
self.id: str = ""
self.self_link: str = ""
self.parent_link: str = ""
self.is_root: bool = False
def _scrape(self, parent_reference_data: Dict[str, any]):
self.kind = parent_reference_data.get('kind')
self.id = parent_reference_data.get('id')
self.self_link = parent_reference_data.get('selfLink')
self.parent_link = parent_reference_data.get('parentLink')
self.is_root = parent_reference_data.get('isRoot')
class DriveSource(Parser):
def __init__(self):
self.client_service_id: str = ""
self.value: str = ""
def _scrape(self, source_data: Dict[str, str]):
self.client_service_id = source_data.get('clientServiceId')
self.value = source_data.get('value')
class DriveFolderProperties(Parser):
def __init__(self):
self.psyncho_root: bool = False
self.psyncho_folder: bool = False
self.machine_root: bool = False
self.arbitrary_sync_folder: bool = False
self.external_media: bool = False
self.photos_and_videos_only: bool = False
def _scrape(self, folder_properties_data: Dict[str, bool]):
self.psyncho_root = folder_properties_data.get('psynchoRoot')
self.psyncho_folder = folder_properties_data.get('psynchoFolder')
self.machine_root = folder_properties_data.get('machineRoot')
self.arbitrary_sync_folder = folder_properties_data.get('arbitrarySyncFolder')
self.external_media = folder_properties_data.get('externalMedia')
self.photos_and_videos_only = folder_properties_data.get('photosAndVideosOnly')
class DriveCommentList(Parser):
def __init__(self):
self.kind: str = ""
self.self_link: str = ""
self.items: List[DriveComment] = []
def _scrape(self, comment_list_data: Dict[str, any]):
self.kind = comment_list_data.get('kind')
self.self_link = comment_list_data.get('selfLink')
if (items_data := comment_list_data.get('items')):
for items_data_item in items_data:
items_item = DriveComment()
items_item._scrape(items_data_item)
self.items.append(items_item)
class DriveComment(Parser):
def __init__(self):
self.comment_id: str = ""
self.kind: str = ""
self.created_date: str = ""
self.modified_date: str = ""
self.file_id: str = ""
self.status: str = ""
self.anchor: str = ""
self.replies: List[DriveCommentReply] = []
self.author: DriveUser = DriveUser()
self.deleted: bool = False
self.html_content: str = ""
self.content: str = ""
self.context: DriveCommentContext = DriveCommentContext()
self.file_title: str = ""
def _scrape(self, comment_data: Dict[str, any]):
self.comment_id = comment_data.get('commentId')
self.kind = comment_data.get('kind')
self.created_date = comment_data.get('createdDate')
self.modified_date = comment_data.get('modifiedDate')
self.file_id = comment_data.get('fileId')
self.status = comment_data.get('status')
self.anchor = comment_data.get('anchor')
if (replies_data := comment_data.get('replies')):
for replies_data_item in replies_data:
replies_item = DriveCommentReply()
replies_item._scrape(replies_data_item)
self.replies.append(replies_item)
if (author_data := comment_data.get('author')):
self.author._scrape(author_data)
self.deleted = comment_data.get('deleted')
self.html_content = comment_data.get('htmlContent')
self.content = comment_data.get('content')
if (context_data := comment_data.get('context')):
self.context._scrape(context_data)
self.file_title = comment_data.get('fileTitle')
class DriveCommentContext(Parser):
def __init__(self):
self.type: str = ""
self.value: str = ""
def _scrape(self, context_data: Dict[str, str]):
self.type = context_data.get('type')
self.value = context_data.get('value')
class DriveCommentReply(Parser):
def __init__(self):
self.reply_id: str = ""
self.kind: str = ""
self.created_date: str = ""
self.modified_date: str = ""
self.author: DriveUser = DriveUser()
self.deleted: bool = False
self.html_content: str = ""
self.content: str = ""
def _scrape(self, comment_reply_data: Dict[str, any]):
self.reply_id = comment_reply_data.get('replyId')
self.kind = comment_reply_data.get('kind')
self.created_date = comment_reply_data.get('createdDate')
self.modified_date = comment_reply_data.get('modifiedDate')
if (author_data := comment_reply_data.get('author')):
self.author._scrape(author_data)
self.deleted = comment_reply_data.get('deleted')
self.html_content = comment_reply_data.get('htmlContent')
self.content = comment_reply_data.get('content')

176
ghunt/parsers/people.py Normal file
View File

@@ -0,0 +1,176 @@
from typing import *
from datetime import datetime
from ghunt.errors import *
from ghunt.helpers.utils import is_default_profile_pic, unicode_patch
from ghunt.objects.apis import Parser
import httpx
import imagehash
class PersonGplusExtendedData(Parser):
def __init__(self):
self.contentRestriction: str = ""
self.isEntrepriseUser: bool = False
def _scrape(self, gplus_data):
self.contentRestriction = gplus_data.get("contentRestriction")
if (isEnterpriseUser := gplus_data.get("isEnterpriseUser")):
self.isEntrepriseUser = isEnterpriseUser
class PersonDynamiteExtendedData(Parser):
def __init__(self):
self.presence: str = ""
self.entityType: str = ""
self.dndState: str = ""
self.customerId: str = ""
def _scrape(self, dynamite_data):
self.presence = dynamite_data.get("presence")
self.entityType = dynamite_data.get("entityType")
self.dndState = dynamite_data.get("dndState")
if (customerId := dynamite_data.get("organizationInfo", {}).get("customerInfo", {}).
get("customerId", {}).get("customerId")):
self.customerId = customerId
class PersonExtendedData(Parser):
def __init__(self):
self.dynamiteData: PersonDynamiteExtendedData = PersonDynamiteExtendedData()
self.gplusData: PersonGplusExtendedData = PersonGplusExtendedData()
def _scrape(self, extended_data: Dict[str, any]):
if (dynamite_data := extended_data.get("dynamiteExtendedData")):
self.dynamiteData._scrape(dynamite_data)
if (gplus_data := extended_data.get("gplusExtendedData")):
self.gplusData._scrape(gplus_data)
class PersonPhoto(Parser):
def __init__(self):
self.url: str = ""
self.isDefault: bool = False
self.flathash: str = None
async def _scrape(self, as_client: httpx.AsyncClient, photo_data: Dict[str, any], photo_type: str):
if photo_type == "profile_photo":
self.url = photo_data.get("url")
self.isDefault, self.flathash = await is_default_profile_pic(as_client, self.url)
elif photo_type == "cover_photo":
self.url = '='.join(photo_data.get("imageUrl").split("=")[:-1])
if (isDefault := photo_data.get("isDefault")):
self.isDefault = isDefault
else:
raise GHuntAPIResponseParsingError(f'The provided photo type "{photo_type}" weren\'t recognized.')
class PersonEmail(Parser):
def __init__(self):
self.value: str = ""
def _scrape(self, email_data: Dict[str, any]):
self.value = email_data.get("value")
class PersonName(Parser):
def __init__(self):
self.fullname: str = ""
self.firstName: str = ""
self.lastName: str = ""
def _scrape(self, name_data: Dict[str, any]):
self.fullname = unicode_patch(x) if (x := name_data.get("displayName")) else None
self.firstName = unicode_patch(x) if (x := name_data.get("givenName")) else None
self.lastName = unicode_patch(x) if (x := name_data.get("familyName")) else None
class PersonProfileInfo(Parser):
def __init__(self):
self.userTypes: List[str] = []
def _scrape(self, profile_data: Dict[str, any]):
if "ownerUserType" in profile_data:
self.userTypes += profile_data.get("ownerUserType")
class PersonSourceIds(Parser):
def __init__(self):
self.lastUpdated: datetime = None
def _scrape(self, source_ids_data: Dict[str, any]):
if (timestamp := source_ids_data.get("lastUpdatedMicros")):
self.lastUpdated = datetime.utcfromtimestamp(float(timestamp[:10]))
class PersonInAppReachability(Parser):
def __init__(self):
self.apps: List[str] = []
def _scrape(self, apps_data, container_name: str):
for app in apps_data:
if app["metadata"]["container"] == container_name:
self.apps.append(app["appType"].title())
class PersonContainers(dict):
pass
class Person(Parser):
def __init__(self):
self.personId: str = ""
self.sourceIds: Dict[str, PersonSourceIds] = PersonContainers() # All the fetched containers
self.emails: Dict[str, PersonEmail] = PersonContainers()
self.names: Dict[str, PersonName] = PersonContainers()
self.profileInfos: Dict[str, PersonProfileInfo] = PersonContainers()
self.profilePhotos: Dict[str, PersonPhoto] = PersonContainers()
self.coverPhotos: Dict[str, PersonPhoto] = PersonContainers()
self.inAppReachability: Dict[str, PersonInAppReachability] = PersonContainers()
self.extendedData: PersonExtendedData = PersonExtendedData()
async def _scrape(self, as_client: httpx.AsyncClient, person_data: Dict[str, any]):
self.personId = person_data.get("personId")
if person_data.get("email"):
for email_data in person_data["email"]:
person_email = PersonEmail()
person_email._scrape(email_data)
self.emails[email_data["metadata"]["container"]] = person_email
if person_data.get("name"):
for name_data in person_data["name"]:
person_name = PersonName()
person_name._scrape(name_data)
self.names[name_data["metadata"]["container"]] = person_name
if person_data.get("readOnlyProfileInfo"):
for profile_data in person_data["readOnlyProfileInfo"]:
person_profile = PersonProfileInfo()
person_profile._scrape(profile_data)
self.profileInfos[profile_data["metadata"]["container"]] = person_profile
if (source_ids := person_data.get("metadata", {}).get("identityInfo", {}).get("sourceIds")):
for source_ids_data in source_ids:
person_source_ids = PersonSourceIds()
person_source_ids._scrape(source_ids_data)
self.sourceIds[source_ids_data["container"]] = person_source_ids
if person_data.get("photo"):
for photo_data in person_data["photo"]:
person_photo = PersonPhoto()
await person_photo._scrape(as_client, photo_data, "profile_photo")
self.profilePhotos[profile_data["metadata"]["container"]] = person_photo
if person_data.get("coverPhoto"):
for cover_photo_data in person_data["coverPhoto"]:
person_cover_photo = PersonPhoto()
await person_cover_photo._scrape(as_client, cover_photo_data, "cover_photo")
self.coverPhotos[cover_photo_data["metadata"]["container"]] = person_cover_photo
if (apps_data := person_data.get("inAppReachability")):
containers_names = set()
for app_data in person_data["inAppReachability"]:
containers_names.add(app_data["metadata"]["container"])
for container_name in containers_names:
person_app_reachability = PersonInAppReachability()
person_app_reachability._scrape(apps_data, container_name)
self.inAppReachability[container_name] = person_app_reachability
if (extended_data := person_data.get("extendedData")):
self.extendedData._scrape(extended_data)

364
ghunt/parsers/playgames.py Normal file
View File

@@ -0,0 +1,364 @@
from typing import *
from datetime import datetime
from ghunt.objects.apis import Parser
### Profile
class PlayerProfile(Parser):
def __init__(self):
self.display_name: str = ""
self.id: str = ""
self.avatar_url: str = ""
self.banner_url_portrait: str = ""
self.banner_url_landscape: str = ""
self.gamertag: str = ""
self.last_played_app: PlayerPlayedApp = PlayerPlayedApp()
self.profile_settings: PlayerProfileSettings = PlayerProfileSettings()
self.experience_info: PlayerExperienceInfo = PlayerExperienceInfo()
self.title: str = ""
def _scrape(self, player_data: Dict[str, any]):
self.display_name = player_data.get("playerId")
self.display_name = player_data.get("displayName")
self.avatar_url = player_data.get("avatarImageUrl")
self.banner_url_portrait = player_data.get("bannerUrlPortrait")
self.banner_url_landscape = player_data.get("bannerUrlLandscape")
self.gamertag = player_data.get("gamerTag")
if (last_played_app_data := player_data.get("lastPlayedApp")):
self.last_played_app._scrape(last_played_app_data)
if (profile_settings_data := player_data.get("profileSettings")):
self.profile_settings._scrape(profile_settings_data)
if (experience_data := player_data.get("experienceInfo")):
self.experience_info._scrape(experience_data)
self.title = player_data.get("title")
class PlayerPlayedApp(Parser):
def __init__(self):
self.app_id: str = ""
self.icon_url: str = ""
self.featured_image_url: str = ""
self.app_name: str = ""
self.timestamp_millis: str = ""
def _scrape(self, played_app_data: Dict[str, any]):
self.app_id = played_app_data.get("applicationId")
self.icon_url = played_app_data.get("applicationIconUrl")
self.featured_image_url = played_app_data.get("featuredImageUrl")
self.app_name = played_app_data.get("applicationName")
if (timestamp := played_app_data.get("timeMillis")):
self.timestamp_millis = datetime.utcfromtimestamp(float(timestamp[:10]))
class PlayerExperienceInfo(Parser):
def __init__(self):
self.current_xp: str = ""
self.last_level_up_timestamp_millis: str = ""
self.current_level: PlayerLevel = PlayerLevel()
self.next_level: PlayerLevel = PlayerLevel()
self.total_unlocked_achievements: int = 0
def _scrape(self, experience_data: Dict[str, any]):
self.current_xp = experience_data.get("currentExperiencePoints")
if (timestamp := experience_data.get("lastLevelUpTimestampMillis")):
self.last_level_up_timestamp_millis = datetime.utcfromtimestamp(float(timestamp[:10]))
if (current_level_data := experience_data.get("currentLevel")):
self.current_level._scrape(current_level_data)
if (next_level_data := experience_data.get("nextLevel")):
self.next_level._scrape(next_level_data)
self.total_unlocked_achievements = experience_data.get("totalUnlockedAchievements")
class PlayerLevel(Parser):
def __init__(self):
self.level: int = 0
self.min_xp: str = ""
self.max_xp: str = ""
def _scrape(self, level_data: Dict[str, any]):
self.level = level_data.get("level")
self.min_xp = level_data.get("minExperiencePoints")
self.max_xp = level_data.get("maxExperiencePoints")
class PlayerProfileSettings(Parser):
def __init__(self):
self.profile_visible: bool = False
def _scrape(self, profile_settings_data: Dict[str, any]):
self.profile_visible = profile_settings_data.get("profileVisible")
### Played Applications
class PlayedGames(Parser):
def __init__(self):
self.games: List[PlayGame] = []
def _scrape(self, games_data: Dict[str, any]):
for game_data in games_data:
play_game = PlayGame()
play_game._scrape(game_data)
self.games.append(play_game)
class PlayGame(Parser):
def __init__(self):
self.game_data: PlayGameData = PlayGameData()
self.market_data: PlayGameMarketData = PlayGameMarketData()
self.formatted_last_played_time: str = ""
self.last_played_time_millis: str = ""
self.unlocked_achievement_count: int = 0
def _scrape(self, game_data: Dict[str, any]):
if (games_data := game_data.get("gamesData")):
self.game_data._scrape(games_data)
if (market_data := game_data.get("marketData")):
self.market_data._scrape(market_data)
self.formatted_last_played_time = game_data.get("formattedLastPlayedTime")
if (timestamp := game_data.get("lastPlayedTimeMillis")):
self.last_played_time_millis = datetime.utcfromtimestamp(float(timestamp[:10]))
self.unlocked_achievement_count = game_data.get("unlockedAchievementCount")
class PlayGameMarketData(Parser):
def __init__(self):
self.instances: List[PlayGameMarketInstance] = []
def _scrape(self, market_data: Dict[str, any]):
if (instances_data := market_data.get("instances")):
for instance_data in instances_data:
instance = PlayGameMarketInstance()
instance._scrape(instance_data)
self.instances.append(instance)
class PlayGameMarketInstance(Parser):
def __init__(self):
self.id: str = ""
self.title: str = ""
self.description: str = ""
self.images: List[PlayGameImageAsset] = []
self.developer_name: str = ""
self.categories: List[str] = []
self.formatted_price: str = ""
self.price_micros: str = ""
self.badges: List[PlayGameMarketBadge] = []
self.is_owned: bool = False
self.enabled_features: List[str] = []
self.description_snippet: str = ""
self.rating: PlayGameMarketRating = PlayGameMarketRating()
self.last_updated_timestamp_millis: str = ""
self.availability: str = ""
def _scrape(self, instance_data: Dict[str, any]):
self.id = instance_data.get("id")
self.title = instance_data.get("title")
self.description = instance_data.get("description")
if (images_data := instance_data.get("images")):
for image_data in images_data:
image = PlayGameImageAsset()
image._scrape(image_data)
self.images.append(image)
self.developer_name = instance_data.get("developerName")
self.categories = instance_data.get("categories", [])
self.formatted_price = instance_data.get("formattedPrice")
self.price_micros = instance_data.get("priceMicros")
if (badges_data := instance_data.get("badges")):
for badge_data in badges_data:
badge = PlayGameMarketBadge()
badge._scrape(badge_data)
self.badges.append(badge)
self.is_owned = instance_data.get("isOwned")
self.enabled_features = instance_data.get("enabledFeatures", [])
self.description_snippet = instance_data.get("descriptionSnippet")
if (rating_data := instance_data.get("rating")):
self.rating._scrape(rating_data)
if (timestamp := instance_data.get("lastUpdatedTimestampMillis")):
self.last_updated_timestamp_millis = datetime.utcfromtimestamp(float(timestamp[:10]))
self.availability = instance_data.get("availability")
class PlayGameMarketRating(Parser):
def __init__(self):
self.star_rating: float = 0.0
self.ratings_count: str = ""
def _scrape(self, rating_data: Dict[str, any]):
self.star_rating = rating_data.get("starRating")
self.ratings_count = rating_data.get("ratingsCount")
class PlayGameMarketBadge(Parser):
def __init__(self):
self.badge_type: str = ""
self.title: str = ""
self.description: str = ""
self.images: List[PlayGameImageAsset] = []
def _scrape(self, badge_data: Dict[str, any]):
self.badge_type = badge_data.get("badgeType")
self.title = badge_data.get("title")
self.description = badge_data.get("description")
if (images_data := badge_data.get("images")):
for image_data in images_data:
image = PlayGameImageAsset()
image._scrape(image_data)
self.images.append(image)
class PlayGameData(Parser):
def __init__(self):
self.id: str = ""
self.name: str = ""
self.author: str = ""
self.description: str = ""
self.category: PlayGameCategory = PlayGameCategory()
self.assets: List[PlayGameImageAsset] = []
self.instances: List[PlayGameInstance] = []
self.last_updated_timestamp: str = ""
self.achievement_count: int = 0,
self.leaderboard_count: int = 0,
self.enabled_features: List[str] = []
self.theme_color: str = ""
def _scrape(self, game_data: Dict[str, any]):
self.id = game_data.get("id")
self.name = game_data.get("name")
self.author = game_data.get("author")
self.description = game_data.get("description")
if (category_data := game_data.get("category")):
self.category._scrape(category_data)
if (assets_data := game_data.get("assets")):
for asset_data in assets_data:
asset = PlayGameImageAsset()
asset._scrape(asset_data)
self.assets.append(asset)
if (instances_data := game_data.get("instances")):
for instance_data in instances_data:
instance = PlayGameInstance()
instance._scrape(instance_data)
self.instances.append(instance)
if (timestamp := game_data.get("lastUpdatedTimestamp")):
self.last_updated_timestamp = datetime.utcfromtimestamp(float(timestamp[:10]))
self.achievement_count = game_data.get("achievement_count")
self.leaderboard_count = game_data.get("leaderboard_count")
self.enabled_features = game_data.get("enabledFeatures", [])
self.theme_color = game_data.get("themeColor")
class PlayGameInstance(Parser):
def __init__(self):
self.platform_type: str = ""
self.name: str = ""
self.turn_based_play: bool = False
self.realtime_play: bool = False
self.android_instance: List[PlayGameAndroidInstance] = []
self.acquisition_uri: str = ""
def _scrape(self, instance_data: Dict[str, any]):
self.platform_type = instance_data.get("plateformType")
self.name = instance_data.get("name")
self.turn_based_play = instance_data.get("turnBasedPlay")
self.realtime_play = instance_data.get("realtimePlay")
if (android_instance_data := instance_data.get("androidInstance")):
android_instance = PlayGameAndroidInstance()
android_instance._scrape(android_instance_data)
self.android_instance.append(android_instance_data)
class PlayGameAndroidInstance(Parser):
def __init__(self):
self.package_name: str = ""
self.enable_piracy_check: bool = False
self.preferred: bool = False
def _scrape(self, android_instance_data: Dict[str, any]):
self.package_name = android_instance_data.get("packageName")
self.enable_piracy_check = android_instance_data.get("enablePiracyCheck")
self.preferred = android_instance_data.get("preferred")
class PlayGameImageAsset(Parser):
def __init__(self):
self.name: str = ""
self.width: str = ""
self.height: str = ""
self.url: str = ""
def _scrape(self, image_data: Dict[str, any]):
self.name = image_data.get("name")
self.width = image_data.get("width")
self.height = image_data.get("height")
self.url = image_data.get("url")
class PlayGameCategory(Parser):
def __init__(self):
self.primary: str = ""
def _scrape(self, category_data: Dict[str, any]):
self.primary = category_data.get("primary")
### Achievements
class PlayerAchievements(Parser):
def __init__(self):
self.achievements: List[PlayerAchievement] = []
def _scrape(self, achievements_data: Dict[str, any]):
achievements_defs : List[PlayerAchievementDefinition] = []
if (achievement_defs_data := achievements_data.get("definitions")):
for achievement_def_data in achievement_defs_data:
achievement_def = PlayerAchievementDefinition()
achievement_def._scrape(achievement_def_data)
achievements_defs.append(achievement_def)
if (achievements_items_data := achievements_data.get("items")):
for achievement_item_data in achievements_items_data:
achievement = PlayerAchievement()
achievement._scrape(achievement_item_data)
for achievement_def in achievements_defs:
if achievement_def.id == achievement.id:
achievement.definition = achievement_def
self.achievements.append(achievement)
class PlayerAchievement(Parser):
def __init__(self):
self.id: str = ""
self.achievement_state: str = ""
self.last_updated_timestamp: datetime = 0
self.app_id: str = 0
self.xp: str = ""
self.definition: PlayerAchievementDefinition = PlayerAchievementDefinition()
def _scrape(self, achievement_item_data: Dict[str, any]):
self.id = achievement_item_data.get("id")
self.achievement_state = achievement_item_data.get("achievementState")
if (timestamp := achievement_item_data.get("lastUpdatedTimestamp")):
self.last_updated_timestamp = datetime.utcfromtimestamp(float(timestamp[:10]))
self.app_id = achievement_item_data.get("application_id")
self.xp = achievement_item_data.get("experiencePoints")
class PlayerAchievementDefinition(Parser):
def __init__(self):
self.id: str = ""
self.name: str = ""
self.description: str = ""
self.achievement_type: str = ""
self.xp: str = ""
self.revealed_icon_url: str = ""
self.unlocked_icon_url: str = ""
self.initial_state: str = ""
self.is_revealed_icon_url_default: bool = False
self.is_unlocked_icon_url_default: bool = False
self.rarity_percent: float = 0.0
def _scrape(self, achievement_def_data: Dict[str, any]):
self.id = achievement_def_data.get("id")
self.name = achievement_def_data.get("name")
self.description = achievement_def_data.get("description")
self.achievement_type = achievement_def_data.get("achievementType")
self.xp = achievement_def_data.get("experiencePoints")
self.revealed_icon_url = achievement_def_data.get("revealedIconUrl")
self.unlocked_icon_url = achievement_def_data.get("unlockedIconUrl")
self.initial_state = achievement_def_data.get("initialState")
self.is_revealed_icon_url_default = achievement_def_data.get("isRevealedIconUrlDefault")
self.is_unlocked_icon_url_default = achievement_def_data.get("isUnlockedIconUrlDefault")
self.rarity_percent = achievement_def_data.get("rarityParcent")
### Global
class Player(Parser):
def __init__(self, profile: PlayerProfile = PlayerProfile(),
played_games: List[PlayGame] = [],
achievements: List[PlayerAchievement] = []):
self.profile: PlayerProfile = profile
self.played_games: List[PlayGame] = played_games
self.achievements: List[PlayerAchievement] = achievements

View File

@@ -0,0 +1,43 @@
from typing import *
from ghunt.protos.playgatewaypa.search_player_results_pb2 import PlayerSearchResultsProto
from ghunt.protos.playgatewaypa.get_player_response_pb2 import GetPlayerResponseProto
from ghunt.objects.apis import Parser
class PlayerSearchResult(Parser):
def __init__(self):
self.name: str = ""
self.id: str = ""
self.avatar_url: str = ""
def _scrape(self, player_result_data):
self.name = player_result_data.account.name
self.id = player_result_data.account.id
self.avatar_url = player_result_data.avatar.url
class PlayerSearchResults(Parser):
def __init__(self):
self.results: List[PlayerSearchResult] = []
def _scrape(self, proto_results: PlayerSearchResultsProto):
for player_result_data in proto_results.field1.results.field1.field1.player:
player_search_result = PlayerSearchResult()
player_search_result._scrape(player_result_data)
self.results.append(player_search_result)
class PlayerProfile(Parser):
"""
This parsing is not complete at all, we only use it
in GHunt to dump total played games & achievements.
"""
def __init__(self):
self.achievements_count: int = 0
self.played_games_count: int = 0
def _scrape(self, proto_results: GetPlayerResponseProto):
for section in proto_results.field1.results.section:
if section.field3.section_name == "Games":
self.played_games_count = int(section.counter.number)
elif section.field3.section_name == "Achievements":
self.achievements_count = int(section.counter.number)

94
ghunt/parsers/vision.py Normal file
View File

@@ -0,0 +1,94 @@
from ghunt.objects.apis import Parser
from typing import *
class VisionPosition(Parser):
def __init__(self):
self.x: int = None
self.y: int = None
self.z: int = None
def _scrape(self, position_data: Dict[str, int]):
self.x = position_data.get("x")
self.y = position_data.get("y")
self.z = position_data.get("z")
class VisionLandmark(Parser):
def __init__(self):
self.type: str = ""
self.position: VisionPosition = VisionPosition()
def _scrape(self, landmark_data: Dict[str, any]):
self.type = landmark_data["type"]
self.position._scrape(landmark_data["position"])
class VisionVertice(Parser):
def __init__(self):
self.x: int = None
self.y: int = None
def _scrape(self, vertice_data: Dict[str, int]):
self.x = vertice_data.get("x")
self.y = vertice_data.get("y")
class VisionVertices(Parser):
def __init__(self):
self.vertices: List[VisionVertice] = []
def _scrape(self, vertices_data: List[Dict[str, int]]):
for vertice_data in vertices_data:
vertice = VisionVertice()
vertice._scrape(vertice_data)
self.vertices.append(vertice)
class VisionFaceAnnotation(Parser):
def __init__(self):
self.bounding_poly: VisionVertices = VisionVertices()
self.fd_bounding_poly: VisionVertices = VisionVertices()
self.landmarks: List[VisionLandmark] = []
self.roll_angle: int = 0,
self.pan_angle: int = 0,
self.tilt_angle: int = 0,
self.detection_confidence: int = 0,
self.landmarking_confidence: int = 0,
self.joy_likelihood: str = "",
self.sorrow_likelihood: str = "",
self.anger_likelihood: str = "",
self.surprise_likelihood: str = "",
self.under_exposed_likelihood: str = "",
self.blurred_likelihood: str = "",
self.headwear_likelihood: str = ""
def _scrape(self, face_data: Dict[str, any]):
if (vertices_data := face_data.get("boundingPoly", {}).get("vertices")):
self.bounding_poly._scrape(vertices_data)
if (vertices_data := face_data.get("fdBoundingPoly", {}).get("vertices")):
self.fd_bounding_poly._scrape(vertices_data)
if (landmarks_data := face_data.get("landmarks")):
for landmark_data in landmarks_data:
landmark = VisionLandmark()
landmark._scrape(landmark_data)
self.landmarks.append(landmark)
self.roll_angle = face_data.get("rollAngle")
self.pan_angle = face_data.get("panAngle")
self.tilt_angle = face_data.get("tiltAngle")
self.detection_confidence = face_data.get("detectionConfidence")
self.landmarking_confidence = face_data.get("landmarkingConfidence")
self.joy_likelihood = face_data.get("joyLikelihood")
self.sorrow_likelihood = face_data.get("sorrowLikelihood")
self.anger_likelihood = face_data.get("angerLikelihood")
self.surprise_likelihood = face_data.get("surpriseLikelihood")
self.under_exposed_likelihood = face_data.get("underExposedLikelihood")
self.blurred_likelihood = face_data.get("blurredLikelihood")
self.headwear_likelihood = face_data.get("headwearLikelihood")
class VisionFaceDetection(Parser):
def __init__(self):
self.face_annotations: List[VisionFaceAnnotation] = []
def _scrape(self, vision_data: Dict[str, any]):
for face_data in vision_data["faceAnnotations"]:
face_annotation = VisionFaceAnnotation()
face_annotation._scrape(face_data)
self.face_annotations.append(face_annotation)

0
ghunt/protos/__init__.py Normal file
View File

View File

View File

@@ -0,0 +1,16 @@
syntax = "proto3";
message GetPlayerProto {
message Form {
message Query {
string id = 1;
}
Query query = 303102213;
}
Form form = 2;
}

View File

@@ -0,0 +1,191 @@
syntax = "proto3";
message GetPlayerResponseProto {
message field1_type {
message Results {
message Player {
message field1_type {
string name = 1;
}
field1_type field1 = 1;
message Avatar {
string url = 1;
}
Avatar avatar = 2;
}
Player player = 1;
message Section {
message Counter {
string number = 1;
}
Counter counter = 2;
message field3_type {
string section_name = 1;
}
field3_type field3 = 3;
message PlayedGames {
message field1_type {
message field1_type {
message field203130867_type {
string field1 = 1;
}
field203130867_type field203130867 = 203130867;
}
field1_type field1 = 1;
message Game {
message Icon {
string url = 1;
}
Icon icon = 3;
message field6_type {
message field1_type {
message Details {
message field1_type {
string app_id = 1;
string package_name = 2;
}
field1_type field1 = 1;
string title = 2;
string editor = 3;
}
Details details = 197793078;
}
field1_type field1 = 1;
}
field6_type field6 = 6;
}
repeated Game game = 208117482;
message field234686954_type {
message field1_type {
string field1 = 1;
message field2_type {
int64 field2 = 2;
}
field2_type field2 = 2;
}
field1_type field1 = 1;
}
field234686954_type field234686954 = 234686954;
}
field1_type field1 = 1;
}
PlayedGames played_games = 5;
message Achievements {
message field1_type {
message field3_type {
message field1_type {
message Achievement {
message field7_type {
message field1_type {
message Details {
string title = 1;
string description = 2;
int64 xp = 3;
fixed32 id = 4;
string icon_url = 5;
string game_id = 6;
int64 timestamp = 7;
int64 field8 = 8;
int64 is_secret = 12;
}
Details details = 306286962;
}
field1_type field1 = 1;
}
field7_type field7 = 7;
}
repeated Achievement achievement = 303956152;
}
field1_type field1 = 1;
message field2_type {
message field1_type {
message Page {
string player_id = 1;
string next_page_token = 3;
}
Page page = 303354831;
}
field1_type field1 = 1;
}
field2_type field2 = 2;
}
field3_type field3 = 3;
int64 field4 = 4;
}
field1_type field1 = 1;
}
Achievements achievements = 6;
}
repeated Section section = 2;
}
Results results = 303102213;
}
field1_type field1 = 1;
}

View File

@@ -0,0 +1,16 @@
syntax = "proto3";
message PlayerSearchProto {
message SearchForm {
message Query {
string text = 1;
}
Query query = 233436060;
}
SearchForm search_form = 2;
}

View File

@@ -0,0 +1,44 @@
syntax = "proto3";
message PlayerSearchResultsProto {
message field1_type {
message Results {
message field1_type {
message field1_type {
message Player {
message Avatar {
string url = 1;
}
Avatar avatar = 3;
message Account {
string id = 1;
string name = 3;
}
Account account = 6;
}
repeated Player player = 232977370;
}
field1_type field1 = 1;
}
field1_type field1 = 1;
}
Results results = 192500038;
}
field1_type field1 = 1;
}

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: definitions/get_player.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x64\x65\x66initions/get_player.proto\"\x80\x01\n\x0eGetPlayerProto\x12\"\n\x04\x66orm\x18\x02 \x01(\x0b\x32\x14.GetPlayerProto.Form\x1aJ\n\x04\x46orm\x12-\n\x05query\x18\x85\xf2\xc3\x90\x01 \x01(\x0b\x32\x1a.GetPlayerProto.Form.Query\x1a\x13\n\x05Query\x12\n\n\x02id\x18\x01 \x01(\tb\x06proto3')
_GETPLAYERPROTO = DESCRIPTOR.message_types_by_name['GetPlayerProto']
_GETPLAYERPROTO_FORM = _GETPLAYERPROTO.nested_types_by_name['Form']
_GETPLAYERPROTO_FORM_QUERY = _GETPLAYERPROTO_FORM.nested_types_by_name['Query']
GetPlayerProto = _reflection.GeneratedProtocolMessageType('GetPlayerProto', (_message.Message,), {
'Form' : _reflection.GeneratedProtocolMessageType('Form', (_message.Message,), {
'Query' : _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), {
'DESCRIPTOR' : _GETPLAYERPROTO_FORM_QUERY,
'__module__' : 'definitions.get_player_pb2'
# @@protoc_insertion_point(class_scope:GetPlayerProto.Form.Query)
})
,
'DESCRIPTOR' : _GETPLAYERPROTO_FORM,
'__module__' : 'definitions.get_player_pb2'
# @@protoc_insertion_point(class_scope:GetPlayerProto.Form)
})
,
'DESCRIPTOR' : _GETPLAYERPROTO,
'__module__' : 'definitions.get_player_pb2'
# @@protoc_insertion_point(class_scope:GetPlayerProto)
})
_sym_db.RegisterMessage(GetPlayerProto)
_sym_db.RegisterMessage(GetPlayerProto.Form)
_sym_db.RegisterMessage(GetPlayerProto.Form.Query)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_GETPLAYERPROTO._serialized_start=33
_GETPLAYERPROTO._serialized_end=161
_GETPLAYERPROTO_FORM._serialized_start=87
_GETPLAYERPROTO_FORM._serialized_end=161
_GETPLAYERPROTO_FORM_QUERY._serialized_start=142
_GETPLAYERPROTO_FORM_QUERY._serialized_end=161
# @@protoc_insertion_point(module_scope)

View File

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: definitions/search_player.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x64\x65\x66initions/search_player.proto\"\xa3\x01\n\x11PlayerSearchProto\x12\x32\n\x0bsearch_form\x18\x02 \x01(\x0b\x32\x1d.PlayerSearchProto.SearchForm\x1aZ\n\nSearchForm\x12\x35\n\x05query\x18\x9c\xe7\xa7o \x01(\x0b\x32#.PlayerSearchProto.SearchForm.Query\x1a\x15\n\x05Query\x12\x0c\n\x04text\x18\x01 \x01(\tb\x06proto3')
_PLAYERSEARCHPROTO = DESCRIPTOR.message_types_by_name['PlayerSearchProto']
_PLAYERSEARCHPROTO_SEARCHFORM = _PLAYERSEARCHPROTO.nested_types_by_name['SearchForm']
_PLAYERSEARCHPROTO_SEARCHFORM_QUERY = _PLAYERSEARCHPROTO_SEARCHFORM.nested_types_by_name['Query']
PlayerSearchProto = _reflection.GeneratedProtocolMessageType('PlayerSearchProto', (_message.Message,), {
'SearchForm' : _reflection.GeneratedProtocolMessageType('SearchForm', (_message.Message,), {
'Query' : _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), {
'DESCRIPTOR' : _PLAYERSEARCHPROTO_SEARCHFORM_QUERY,
'__module__' : 'definitions.search_player_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchProto.SearchForm.Query)
})
,
'DESCRIPTOR' : _PLAYERSEARCHPROTO_SEARCHFORM,
'__module__' : 'definitions.search_player_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchProto.SearchForm)
})
,
'DESCRIPTOR' : _PLAYERSEARCHPROTO,
'__module__' : 'definitions.search_player_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchProto)
})
_sym_db.RegisterMessage(PlayerSearchProto)
_sym_db.RegisterMessage(PlayerSearchProto.SearchForm)
_sym_db.RegisterMessage(PlayerSearchProto.SearchForm.Query)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_PLAYERSEARCHPROTO._serialized_start=36
_PLAYERSEARCHPROTO._serialized_end=199
_PLAYERSEARCHPROTO_SEARCHFORM._serialized_start=109
_PLAYERSEARCHPROTO_SEARCHFORM._serialized_end=199
_PLAYERSEARCHPROTO_SEARCHFORM_QUERY._serialized_start=178
_PLAYERSEARCHPROTO_SEARCHFORM_QUERY._serialized_end=199
# @@protoc_insertion_point(module_scope)

View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: definitions/search_player_results.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'definitions/search_player_results.proto\"\xe6\x05\n\x18PlayerSearchResultsProto\x12\x35\n\x06\x66ield1\x18\x01 \x01(\x0b\x32%.PlayerSearchResultsProto.field1_type\x1a\x92\x05\n\x0b\x66ield1_type\x12\x41\n\x07results\x18\xc6\xa2\xe5[ \x01(\x0b\x32-.PlayerSearchResultsProto.field1_type.Results\x1a\xbf\x04\n\x07Results\x12I\n\x06\x66ield1\x18\x01 \x01(\x0b\x32\x39.PlayerSearchResultsProto.field1_type.Results.field1_type\x1a\xe8\x03\n\x0b\x66ield1_type\x12U\n\x06\x66ield1\x18\x01 \x01(\x0b\x32\x45.PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type\x1a\x81\x03\n\x0b\x66ield1_type\x12_\n\x06player\x18\xda\xe7\x8bo \x03(\x0b\x32L.PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player\x1a\x90\x02\n\x06Player\x12\x63\n\x06\x61vatar\x18\x03 \x01(\x0b\x32S.PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Avatar\x12\x65\n\x07\x61\x63\x63ount\x18\x06 \x01(\x0b\x32T.PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Account\x1a\x15\n\x06\x41vatar\x12\x0b\n\x03url\x18\x01 \x01(\t\x1a#\n\x07\x41\x63\x63ount\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\tb\x06proto3')
_PLAYERSEARCHRESULTSPROTO = DESCRIPTOR.message_types_by_name['PlayerSearchResultsProto']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE = _PLAYERSEARCHRESULTSPROTO.nested_types_by_name['field1_type']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE.nested_types_by_name['Results']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS.nested_types_by_name['field1_type']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE.nested_types_by_name['field1_type']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE.nested_types_by_name['Player']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_AVATAR = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER.nested_types_by_name['Avatar']
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_ACCOUNT = _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER.nested_types_by_name['Account']
PlayerSearchResultsProto = _reflection.GeneratedProtocolMessageType('PlayerSearchResultsProto', (_message.Message,), {
'field1_type' : _reflection.GeneratedProtocolMessageType('field1_type', (_message.Message,), {
'Results' : _reflection.GeneratedProtocolMessageType('Results', (_message.Message,), {
'field1_type' : _reflection.GeneratedProtocolMessageType('field1_type', (_message.Message,), {
'field1_type' : _reflection.GeneratedProtocolMessageType('field1_type', (_message.Message,), {
'Player' : _reflection.GeneratedProtocolMessageType('Player', (_message.Message,), {
'Avatar' : _reflection.GeneratedProtocolMessageType('Avatar', (_message.Message,), {
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_AVATAR,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Avatar)
})
,
'Account' : _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_ACCOUNT,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Account)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results.field1_type)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type.Results)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto.field1_type)
})
,
'DESCRIPTOR' : _PLAYERSEARCHRESULTSPROTO,
'__module__' : 'definitions.search_player_results_pb2'
# @@protoc_insertion_point(class_scope:PlayerSearchResultsProto)
})
_sym_db.RegisterMessage(PlayerSearchResultsProto)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results.field1_type)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Avatar)
_sym_db.RegisterMessage(PlayerSearchResultsProto.field1_type.Results.field1_type.field1_type.Player.Account)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_PLAYERSEARCHRESULTSPROTO._serialized_start=44
_PLAYERSEARCHRESULTSPROTO._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE._serialized_start=128
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS._serialized_start=211
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE._serialized_start=298
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE._serialized_start=401
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER._serialized_start=514
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER._serialized_end=786
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_AVATAR._serialized_start=728
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_AVATAR._serialized_end=749
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_ACCOUNT._serialized_start=751
_PLAYERSEARCHRESULTSPROTO_FIELD1_TYPE_RESULTS_FIELD1_TYPE_FIELD1_TYPE_PLAYER_ACCOUNT._serialized_end=786
# @@protoc_insertion_point(module_scope)

View File

@@ -1,18 +0,0 @@
from colorama import init, Fore, Back, Style
def banner():
init()
banner = """
""" + Fore.RED + """ .d8888b. """ + Fore.BLUE + """888 888""" + Fore.RED + """ 888
""" + Fore.RED + """d88P Y88b """ + Fore.BLUE + """888 888""" + Fore.RED + """ 888
""" + Fore.YELLOW + """888 """ + Fore.RED + """888 """ + Fore.BLUE + """888 888""" + Fore.RED + """ 888
""" + Fore.YELLOW + """888 """ + Fore.BLUE + """8888888888""" + Fore.GREEN + """ 888 888""" + Fore.YELLOW + """ 88888b. """ + Fore.RED + """ 888888
""" + Fore.YELLOW + """888 """ + Fore.BLUE + """88888 """ + Fore.BLUE + """888 888""" + Fore.GREEN + """ 888 888""" + Fore.YELLOW + """ 888 "88b""" + Fore.RED + """ 888
""" + Fore.YELLOW + """888 """ + Fore.BLUE + """888 """ + Fore.BLUE + """888 888""" + Fore.GREEN + """ 888 888""" + Fore.YELLOW + """ 888 888""" + Fore.RED + """ 888
""" + Fore.GREEN + """Y88b d88P """ + Fore.BLUE + """888 888""" + Fore.GREEN + """ Y88b 888""" + Fore.YELLOW + """ 888 888""" + Fore.RED + """ Y88b.
""" + Fore.GREEN + """ "Y8888P88 """ + Fore.BLUE + """888 888""" + Fore.GREEN + """ "Y88888""" + Fore.YELLOW + """ 888 888""" + Fore.RED + """ "Y888
""" + Fore.RESET
print(banner)

View File

@@ -1,91 +0,0 @@
import httpx
from dateutil.relativedelta import relativedelta
from beautifultable import BeautifulTable
from termcolor import colored
import time
import json
from datetime import datetime, timezone
from urllib.parse import urlencode
# assembling the json request url endpoint
def assemble_api_req(calendarId, singleEvents, maxAttendees, maxResults, sanitizeHtml, timeMin, API_key, email):
base_url = f"https://clients6.google.com/calendar/v3/calendars/{email}/events?"
params = {
"calendarId": calendarId,
"singleEvents": singleEvents,
"maxAttendees": maxAttendees,
"maxResults": maxResults,
"timeMin": timeMin,
"key": API_key
}
base_url += urlencode(params, doseq=True)
return base_url
# from iso to datetime object in utc
def get_datetime_utc(date_str):
date = datetime.fromisoformat(date_str)
margin = date.utcoffset()
return date.replace(tzinfo=timezone.utc) - margin
# main method of calendar.py
def fetch(email, client, config):
if not config.calendar_cookies:
cookies = {"CONSENT": config.default_consent_cookie}
client.cookies = cookies
url_endpoint = f"https://calendar.google.com/calendar/u/0/embed?src={email}"
print("\nGoogle Calendar : " + url_endpoint)
req = client.get(url_endpoint + "&hl=en")
source = req.text
try:
# parsing parameters from source code
calendarId = source.split('title\":\"')[1].split('\"')[0]
singleEvents = "true"
maxAttendees = 1
maxResults = 250
sanitizeHtml = "true"
timeMin = datetime.strptime(source.split('preloadStart\":\"')[1].split('\"')[0], '%Y%m%d').replace(tzinfo=timezone.utc).isoformat()
API_key = source.split('developerKey\":\"')[1].split('\"')[0]
except IndexError:
return False
json_calendar_endpoint = assemble_api_req(calendarId, singleEvents, maxAttendees, maxResults, sanitizeHtml, timeMin, API_key, email)
req = client.get(json_calendar_endpoint)
data = json.loads(req.text)
events = []
try:
for item in data["items"]:
title = item["summary"]
start = get_datetime_utc(item["start"]["dateTime"])
end = get_datetime_utc(item["end"]["dateTime"])
events.append({"title": title, "start": start, "end": end})
except KeyError:
return False
return {"status": "available", "events": events}
def out(events):
limit = 5
now = datetime.utcnow().replace(tzinfo=timezone.utc)
after = [date for date in events if date["start"] >= now][:limit]
before = [date for date in events if date["start"] <= now][:limit]
print(f"\n=> The {'next' if after else 'last'} {len(after) if after else len(before)} event{'s' if (len(after) > 1) or (not after and len(before) > 1) else ''} :")
target = after if after else before
table = BeautifulTable()
table.set_style(BeautifulTable.STYLE_GRID)
table.columns.header = [colored(x, attrs=['bold']) for x in ["Name", "Datetime (UTC)", "Duration"]]
for event in target:
title = event["title"]
duration = relativedelta(event["end"], event["start"])
if duration.days or duration.hours or duration.minutes:
duration = (f"{(str(duration.days) + ' day' + ('s' if duration.days > 1 else '')) if duration.days else ''} "
f"{(str(duration.hours) + ' hour' + ('s' if duration.hours > 1 else '')) if duration.hours else ''} "
f"{(str(duration.minutes) + ' minute' + ('s' if duration.minutes > 1 else '')) if duration.minutes else ''}").strip()
else:
duration = "?"
date = event["start"].strftime("%Y/%m/%d %H:%M:%S")
table.rows.append([title, date, duration])
print(table)

View File

@@ -1,255 +0,0 @@
from datetime import datetime
from dateutil.relativedelta import relativedelta
from geopy import distance
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from seleniumwire import webdriver
from lib.utils import *
def scrape(gaiaID, client, cookies, config, headers, regex_rev_by_id, is_headless):
def get_datetime(date_published):
if date_published.split()[0] == "a":
nb = 1
else:
nb = int(date_published.split()[0])
if "minute" in date_published:
delta = relativedelta(minutes=nb)
elif "hour" in date_published:
delta = relativedelta(hours=nb)
elif "day" in date_published:
delta = relativedelta(days=nb)
elif "week" in date_published:
delta = relativedelta(weeks=nb)
elif "month" in date_published:
delta = relativedelta(months=nb)
elif "year" in date_published:
delta = relativedelta(years=nb)
else:
delta = relativedelta()
return (datetime.today() - delta).replace(microsecond=0, second=0)
tmprinter = TMPrinter()
base_url = f"https://www.google.com/maps/contrib/{gaiaID}/reviews?hl=en"
print(f"\nGoogle Maps : {base_url.replace('?hl=en', '')}")
tmprinter.out("Initial request...")
req = client.get(base_url)
source = req.text
data = source.split(';window.APP_INITIALIZATION_STATE=')[1].split(';window.APP_FLAGS')[0].replace("\\", "")
if "/maps/reviews/data" not in data:
tmprinter.out("")
print("[-] No reviews")
return False
chrome_options = get_chrome_options_args(is_headless)
options = {
'connection_timeout': None # Never timeout, otherwise it floods errors
}
tmprinter.out("Starting browser...")
driver_path = get_driverpath()
driver = webdriver.Chrome(executable_path=driver_path, seleniumwire_options=options, options=chrome_options)
driver.header_overrides = headers
wait = WebDriverWait(driver, 15)
tmprinter.out("Setting cookies...")
driver.get("https://www.google.com/robots.txt")
if not config.gmaps_cookies:
cookies = {"CONSENT": config.default_consent_cookie}
for k, v in cookies.items():
driver.add_cookie({'name': k, 'value': v})
tmprinter.out("Fetching reviews page...")
reviews = []
try:
driver.get(base_url)
#wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.section-scrollbox')))
#scrollbox = driver.find_element(By.CSS_SELECTOR, 'div.section-scrollbox')
tab_info = None
try:
tab_info = driver.find_element(by=By.XPATH, value="//span[contains(@aria-label, 'review') and contains(@aria-label, 'rating')]")
except NoSuchElementException:
pass
if tab_info and tab_info.text:
scroll_max = sum([int(x) for x in tab_info.text.split() if x.isdigit()])
else:
return False
tmprinter.clear()
print(f"[+] {scroll_max} reviews found !")
scrollbox = tab_info.find_element(By.XPATH, "../../../..")
timeout = scroll_max * 2.5
timeout_start = time()
reviews_elements = driver.find_elements(by=By.XPATH, value="//div[@data-review-id][@aria-label]")
tmprinter.out(f"Fetching reviews... ({len(reviews_elements)}/{scroll_max})")
while len(reviews_elements) < scroll_max:
driver.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight", scrollbox)
reviews_elements = driver.find_elements(by=By.XPATH, value='//div[@data-review-id][@aria-label]')
tmprinter.out(f"Fetching reviews... ({len(reviews_elements)}/{scroll_max})")
if time() > timeout_start + timeout:
tmprinter.out(f"Timeout while fetching reviews !")
break
tmprinter.out("Fetching internal requests history...")
requests = [r.url for r in driver.requests if "locationhistory" in r.url]
tmprinter.out(f"Fetching internal requests... (0/{len(requests)})")
for nb, load in enumerate(requests):
req = client.get(load)
data += req.text.replace('\n', '')
tmprinter.out(f"Fetching internal requests... ({nb + 1}/{len(requests)})")
tmprinter.out(f"Fetching reviews location... (0/{len(reviews_elements)})")
rating = 0
for nb, review in enumerate(reviews_elements):
id = review.get_attribute("data-review-id")
location = re.compile(regex_rev_by_id.format(id)).findall(data)[0]
try:
stars = review.find_element(By.CSS_SELECTOR, 'span[aria-label$="stars "]')
except Exception:
stars = review.find_element(By.CSS_SELECTOR, 'span[aria-label$="star "]')
rating += int(stars.get_attribute("aria-label").strip().split()[0])
date = get_datetime(stars.find_element(By.XPATH, "following-sibling::span").text)
reviews.append({"location": location, "date": date})
tmprinter.out(f"Fetching reviews location... ({nb + 1}/{len(reviews_elements)})")
rating_avg = rating / len(reviews)
tmprinter.clear()
print(f"[+] Average rating : {int(rating_avg) if int(rating_avg) / round(rating_avg, 1) == 1 else round(rating_avg, 1)}/5 stars !")
# 4.9 => 4.9, 5.0 => 5, we don't show the 0
except TimeoutException as e:
print("Error fetching reviews, it is likely that Google has changed the layout of the reviews page.")
return reviews
def avg_location(locs):
latitude = []
longitude = []
for loc in locs:
latitude.append(float(loc[0]))
longitude.append(float(loc[1]))
latitude = sum(latitude) / len(latitude)
longitude = sum(longitude) / len(longitude)
return latitude, longitude
def translate_confidence(percents):
if percents >= 100:
return "Extremely high"
elif percents >= 80:
return "Very high"
elif percents >= 60:
return "Little high"
elif percents >= 40:
return "Okay"
elif percents >= 20:
return "Low"
elif percents >= 10:
return "Very low"
else:
return "Extremely low"
def get_confidence(geolocator, data, gmaps_radius):
tmprinter = TMPrinter()
radius = gmaps_radius
locations = {}
tmprinter.out(f"Calculation of the distance of each review...")
for nb, review in enumerate(data):
hash = hashlib.md5(str(review).encode()).hexdigest()
if hash not in locations:
locations[hash] = {"dates": [], "locations": [], "range": None, "score": 0}
location = review["location"]
for review2 in data:
location2 = review2["location"]
dis = distance.distance(location, location2).km
if dis <= radius:
locations[hash]["dates"].append(review2["date"])
locations[hash]["locations"].append(review2["location"])
maxdate = max(locations[hash]["dates"])
mindate = min(locations[hash]["dates"])
locations[hash]["range"] = maxdate - mindate
tmprinter.out(f"Calculation of the distance of each review ({nb}/{len(data)})...")
tmprinter.out("")
locations = {k: v for k, v in
sorted(locations.items(), key=lambda k: len(k[1]["locations"]), reverse=True)} # We sort it
tmprinter.out("Identification of redundant areas...")
to_del = []
for hash in locations:
if hash in to_del:
continue
for hash2 in locations:
if hash2 in to_del or hash == hash2:
continue
if all([loc in locations[hash]["locations"] for loc in locations[hash2]["locations"]]):
to_del.append(hash2)
for hash in to_del:
del locations[hash]
tmprinter.out("Calculating confidence...")
maxrange = max([locations[hash]["range"] for hash in locations])
maxlen = max([len(locations[hash]["locations"]) for hash in locations])
minreq = 3
mingroups = 3
score_steps = 4
for hash, loc in locations.items():
if len(loc["locations"]) == maxlen:
locations[hash]["score"] += score_steps * 4
if loc["range"] == maxrange:
locations[hash]["score"] += score_steps * 3
if len(locations) >= mingroups:
others = sum([len(locations[h]["locations"]) for h in locations if h != hash])
if len(loc["locations"]) > others:
locations[hash]["score"] += score_steps * 2
if len(loc["locations"]) >= minreq:
locations[hash]["score"] += score_steps
# for hash,loc in locations.items():
# print(f"{hash} => {len(loc['locations'])} ({int(loc['score'])/40*100})")
panels = sorted(set([loc["score"] for loc in locations.values()]), reverse=True)
maxscore = sum([p * score_steps for p in range(1, score_steps + 1)])
for panel in panels:
locs = [loc for loc in locations.values() if loc["score"] == panel]
if len(locs[0]["locations"]) == 1:
panel /= 2
if len(data) < 4:
panel /= 2
confidence = translate_confidence(panel / maxscore * 100)
for nb, loc in enumerate(locs):
avg = avg_location(loc["locations"])
#import pdb; pdb.set_trace()
while True:
try:
location = geolocator.reverse(f"{avg[0]}, {avg[1]}", timeout=10).raw["address"]
break
except:
pass
location = sanitize_location(location)
locs[nb]["avg"] = location
del locs[nb]["locations"]
del locs[nb]["score"]
del locs[nb]["range"]
del locs[nb]["dates"]
tmprinter.out("")
return confidence, locs

View File

@@ -1,188 +0,0 @@
from datetime import datetime
from PIL import ExifTags
from PIL.ExifTags import TAGS, GPSTAGS
from geopy.geocoders import Nominatim
from lib.utils import *
class ExifEater():
def __init__(self):
self.devices = {}
self.softwares = {}
self.locations = {}
self.geolocator = Nominatim(user_agent="nominatim")
def get_GPS(self, img):
location = ""
geoaxis = {}
geotags = {}
try:
exif = img._getexif()
for (idx, tag) in TAGS.items():
if tag == 'GPSInfo':
if idx in exif:
for (key, val) in GPSTAGS.items():
if key in exif[idx]:
geotags[val] = exif[idx][key]
for axis in ["Latitude", "Longitude"]:
dms = geotags[f'GPS{axis}']
ref = geotags[f'GPS{axis}Ref']
degrees = dms[0][0] / dms[0][1]
minutes = dms[1][0] / dms[1][1] / 60.0
seconds = dms[2][0] / dms[2][1] / 3600.0
if ref in ['S', 'W']:
degrees = -degrees
minutes = -minutes
seconds = -seconds
geoaxis[axis] = round(degrees + minutes + seconds, 5)
location = \
self.geolocator.reverse("{}, {}".format(geoaxis["Latitude"], geoaxis["Longitude"])).raw[
"address"]
except Exception:
return ""
else:
if location:
location = sanitize_location(location)
if not location:
return ""
return f'{location["town"]}, {location["country"]}'
else:
return ""
def feed(self, img):
try:
img._getexif()
except:
try:
img._getexif = img.getexif
except:
img._getexif = lambda d={}:d
if img._getexif():
location = self.get_GPS(img)
exif = {ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS}
interesting_fields = ["Make", "Model", "DateTime", "Software"]
metadata = {k: v for k, v in exif.items() if k in interesting_fields}
try:
date = datetime.strptime(metadata["DateTime"], '%Y:%m:%d %H:%M:%S')
is_date_valid = "Valid"
except Exception:
date = None
is_date_valid = "Invalid"
if location:
if location not in self.locations:
self.locations[location] = {"Valid": [], "Invalid": []}
self.locations[location][is_date_valid].append(date)
if "Make" in metadata and "Model" in metadata:
if metadata["Model"] not in self.devices:
self.devices[metadata["Model"]] = {"Make": metadata["Make"],
"History": {"Valid": [], "Invalid": []}, "Firmwares": {}}
self.devices[metadata["Model"]]["History"][is_date_valid].append(date)
if "Software" in metadata:
if metadata["Software"] not in self.devices[metadata["Model"]]["Firmwares"]:
self.devices[metadata["Model"]]["Firmwares"][metadata["Software"]] = {"Valid": [],
"Invalid": []}
self.devices[metadata["Model"]]["Firmwares"][metadata["Software"]][is_date_valid].append(date)
elif "Software" in metadata:
if metadata["Software"] not in self.softwares:
self.softwares[metadata["Software"]] = {"Valid": [], "Invalid": []}
self.softwares[metadata["Software"]][is_date_valid].append(date)
def give_back(self):
return self.locations, self.devices
def output(self):
bkn = '\n' # to use in f-strings
def picx(n):
return "s" if n > 1 else ""
def print_dates(dates_list):
dates = {}
dates["max"] = max(dates_list).strftime("%Y/%m/%d")
dates["min"] = min(dates_list).strftime("%Y/%m/%d")
if dates["max"] == dates["min"]:
return dates["max"]
else:
return f'{dates["min"]} -> {dates["max"]}'
# pprint((self.devices, self.softwares, self.locations))
devices = self.devices
if devices:
print(f"[+] {len(devices)} device{picx(len(devices))} found !")
for model, data in devices.items():
make = data["Make"]
if model.lower().startswith(make.lower()):
model = model[len(make):].strip()
n = len(data["History"]["Valid"] + data["History"]["Invalid"])
for validity, dateslist in data["History"].items():
if dateslist and (
(validity == "Valid") or (validity == "Invalid" and not data["History"]["Valid"])):
if validity == "Valid":
dates = print_dates(data["History"]["Valid"])
elif validity == "Valid" and data["History"]["Invalid"]:
dates = print_dates(data["History"]["Valid"])
dates += " (+ ?)"
elif validity == "Invalid" and not data["History"]["Valid"]:
dates = "?"
print(
f"{bkn if data['Firmwares'] else ''}- {make.capitalize()} {model} ({n} pic{picx(n)}) [{dates}]")
if data["Firmwares"]:
n = len(data['Firmwares'])
print(f"-> {n} Firmware{picx(n)} found !")
for firmware, firmdata in data["Firmwares"].items():
for validity2, dateslist2 in firmdata.items():
if dateslist2 and ((validity2 == "Valid") or (
validity2 == "Invalid" and not firmdata["Valid"])):
if validity2 == "Valid":
dates2 = print_dates(firmdata["Valid"])
elif validity2 == "Valid" and firmdata["Invalid"]:
dates2 = print_dates(firmdata["Valid"])
dates2 += " (+ ?)"
elif validity2 == "Invalid" and not firmdata["Valid"]:
dates2 = "?"
print(f"--> {firmware} [{dates2}]")
locations = self.locations
if locations:
print(f"\n[+] {len(locations)} location{picx(len(locations))} found !")
for location, data in locations.items():
n = len(data["Valid"] + data["Invalid"])
for validity, dateslist in data.items():
if dateslist and ((validity == "Valid") or (validity == "Invalid" and not data["Valid"])):
if validity == "Valid":
dates = print_dates(data["Valid"])
elif validity == "Valid" and data["Invalid"]:
dates = print_dates(data["Valid"])
dates += " (+ ?)"
elif validity == "Invalid" and not data["Valid"]:
dates = "?"
print(f"- {location} ({n} pic{picx(n)}) [{dates}]")
softwares = self.softwares
if softwares:
print(f"\n[+] {len(softwares)} software{picx(len(softwares))} found !")
for software, data in softwares.items():
n = len(data["Valid"] + data["Invalid"])
for validity, dateslist in data.items():
if dateslist and ((validity == "Valid") or (validity == "Invalid" and not data["Valid"])):
if validity == "Valid":
dates = print_dates(data["Valid"])
elif validity == "Valid" and data["Invalid"]:
dates = print_dates(data["Valid"])
dates += " (+ ?)"
elif validity == "Invalid" and not data["Valid"]:
dates = "?"
print(f"- {software} ({n} pic{picx(n)}) [{dates}]")
if not devices and not locations and not softwares:
print("=> Nothing found")

View File

@@ -1,34 +0,0 @@
from pkg_resources import parse_requirements, parse_version, working_set
def print_help_and_exit():
print("- Windows : py -m pip install --upgrade -r requirements.txt")
print("- Unix : python3 -m pip install --upgrade -r requirements.txt")
exit()
def check_versions(installed_version, op, version):
if (op == ">" and parse_version(installed_version) > parse_version(version)) \
or (op == "<" and parse_version(installed_version) < parse_version(version)) \
or (op == "==" and parse_version(installed_version) == parse_version(version)) \
or (op == ">=" and parse_version(installed_version) >= parse_version(version)) \
or (op == "<=" and parse_version(installed_version) <= parse_version(version)) :
return True
return False
def check():
with open('requirements.txt', "r") as requirements_raw:
requirements = [{"specs": x.specs, "key": x.key} for x in parse_requirements(requirements_raw)]
installed_mods = {mod.key:mod.version for mod in working_set}
for req in requirements:
if req["key"] not in installed_mods:
print(f"[-] [modwall] I can't find the library {req['key']}, did you correctly installed the libraries specified in requirements.txt ? 😤\n")
print_help_and_exit()
else:
if req["specs"] and (specs := req["specs"][0]):
op, version = specs
if not check_versions(installed_mods[req["key"]], op, version):
print(f"[-] [modwall] The library {req['key']} version is {installed_mods[req['key']]} but it requires {op} {version}\n")
print("Please upgrade your libraries specified in the requirements.txt file. 😇")
print_help_and_exit()

View File

@@ -1,46 +0,0 @@
from platform import system, uname
class Os:
"""
returns class with properties:
.cygwin Cygwin detected
.wsl Windows Subsystem for Linux (WSL) detected
.mac Mac OS detected
.linux Linux detected
.bsd BSD detected
"""
def __init__(self):
syst = system().lower()
# initialize
self.cygwin = False
self.wsl = False
self.mac = False
self.linux = False
self.windows = False
self.bsd = False
if 'cygwin' in syst:
self.cygwin = True
self.os = 'cygwin'
elif 'darwin' in syst:
self.mac = True
self.os = 'mac'
elif 'linux' in syst:
self.linux = True
self.os = 'linux'
if 'Microsoft' in uname().release:
self.wsl = True
self.linux = False
self.os = 'wsl'
elif 'windows' in syst:
self.windows = True
self.os = 'windows'
elif 'bsd' in syst:
self.bsd = True
self.os = 'bsd'
def __str__(self):
return self.os

View File

@@ -1,151 +0,0 @@
import re
from io import BytesIO
import pdb
from PIL import Image
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from seleniumwire import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from lib.metadata import ExifEater
from lib.utils import *
class element_has_substring_or_substring(object):
def __init__(self, locator, substring1, substring2):
self.locator = locator
self.substring1 = substring1
self.substring2 = substring2
def __call__(self, driver):
element = driver.find_element(*self.locator) # Finding the referenced element
if self.substring1 in element.text:
return self.substring1
elif self.substring2 in element.text:
return self.substring2
else:
return False
def get_source(gaiaID, client, cookies, headers, is_headless):
baseurl = f"https://get.google.com/albumarchive/{gaiaID}/albums/profile-photos?hl=en"
req = client.get(baseurl)
if req.status_code != 200:
return False
tmprinter = TMPrinter()
chrome_options = get_chrome_options_args(is_headless)
options = {
'connection_timeout': None # Never timeout, otherwise it floods errors
}
tmprinter.out("Starting browser...")
driverpath = get_driverpath()
driver = webdriver.Chrome(executable_path=driverpath, seleniumwire_options=options, options=chrome_options)
driver.header_overrides = headers
wait = WebDriverWait(driver, 30)
tmprinter.out("Setting cookies...")
driver.get("https://get.google.com/robots.txt")
for k, v in cookies.items():
driver.add_cookie({'name': k, 'value': v})
tmprinter.out('Fetching Google Photos "Profile photos" album...')
driver.get(baseurl)
tmprinter.out('Fetching the Google Photos albums overview...')
buttons = driver.find_elements(By.XPATH, "//button")
for button in buttons:
text = button.get_attribute('jsaction')
if text and 'touchcancel' in text:
button.click()
break
else:
tmprinter.out("")
print("Can't get the back button..")
driver.close()
return False
wait.until(EC.text_to_be_present_in_element((By.XPATH, "//body"), "Album Archive"))
tmprinter.out("Got the albums overview !")
no_photos_trigger = "reached the end"
photos_trigger = " item"
body = driver.find_element(By.XPATH, "//body").text
if no_photos_trigger in body:
stats = "notfound"
elif photos_trigger in body:
stats = "found"
else:
try:
result = wait.until(element_has_substring_or_substring((By.XPATH, "//body"), no_photos_trigger, photos_trigger))
except Exception:
tmprinter.out("[-] Timeout while fetching photos.")
return False
else:
if result == no_photos_trigger:
stats = "notfound"
elif result == photos_trigger:
stats = "found"
else:
return False
tmprinter.out("")
source = driver.page_source
driver.close()
return {"stats": stats, "source": source}
def gpics(gaiaID, client, cookies, headers, regex_albums, regex_photos, headless=True):
baseurl = "https://get.google.com/albumarchive/"
print(f"\nGoogle Photos : {baseurl + gaiaID + '/albums/profile-photos'}")
out = get_source(gaiaID, client, cookies, headers, headless)
if not out:
print("=> Couldn't fetch the public photos.")
return False
if out["stats"] == "notfound":
print("=> No album")
return False
# open('debug.html', 'w').write(repr(out["source"]))
results = re.compile(regex_albums).findall(out["source"])
list_albums_length = len(results)
if results:
exifeater = ExifEater()
pics = []
for album in results:
album_name = album[1]
album_link = baseurl + gaiaID + "/album/" + album[0]
album_length = int(album[2])
if album_length >= 1:
try:
req = client.get(album_link)
source = req.text.replace('\n', '')
results_pics = re.compile(regex_photos).findall(source)
for pic in results_pics:
pic_name = pic[1]
pic_link = pic[0]
pics.append(pic_link)
except:
pass
print(f"=> {list_albums_length} albums{', ' + str(len(pics)) + ' photos' if list_albums_length else ''}")
for pic in pics:
try:
req = client.get(pic)
img = Image.open(BytesIO(req.content))
exifeater.feed(img)
except:
pass
print("\nSearching metadata...")
exifeater.output()
else:
print("=> No album")

View File

@@ -1,42 +0,0 @@
import json
import httpx
from pprint import pprint
from time import sleep
def search(query, data_path, gdocs_public_doc, size=1000):
cookies = ""
token = ""
with open(data_path, 'r') as f:
out = json.loads(f.read())
token = out["keys"]["gdoc"]
cookies = out["cookies"]
data = {"request": '["documentsuggest.search.search_request","{}",[{}],null,1]'.format(query, size)}
retries = 10
for retry in list(range(retries))[::-1]:
req = httpx.post('https://docs.google.com/document/d/{}/explore/search?token={}'.format(gdocs_public_doc, token),
cookies=cookies, data=data)
#print(req.text)
if req.status_code == 200:
break
if req.status_code == 500:
if retry == 0:
exit(f"[-] Error (GDocs): request gives {req.status_code}, wait a minute and retry !")
print(f"[-] GDocs request gives a 500 status code, retrying in 5 seconds...")
continue
output = json.loads(req.text.replace(")]}'", ""))
if isinstance(output[0][1], str) and output[0][1].lower() == "xsrf":
exit(f"\n[-] Error : XSRF detected.\nIt means your cookies have expired, please generate new ones.")
results = []
for result in output[0][1]:
link = result[0][0]
title = result[0][1]
desc = result[0][2]
results.append({"title": title, "desc": desc, "link": link})
return results

View File

@@ -1,252 +0,0 @@
import imagehash
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from lib.os_detect import Os
from pathlib import Path
import shutil
from os.path import isfile
import json
import re
from pprint import pprint
from time import time
import hashlib
class TMPrinter():
def __init__(self):
self.max_len = 0
def out(self, text):
if len(text) > self.max_len:
self.max_len = len(text)
else:
text += (" " * (self.max_len - len(text)))
print(text, end='\r')
def clear(self):
print(" " * self.max_len, end="\r")
def within_docker():
return Path('/.dockerenv').is_file()
class Picture:
def __init__(self, url, is_default=False):
self.url = url
self.is_default = is_default
class Contact:
def __init__(self, val, is_primary=True):
self.value = val
self.is_secondary = not is_primary
def is_normalized(self, val):
return val.replace('.', '').lower() == self.value.replace('.', '').lower()
def __str__(self):
printable_value = self.value
if self.is_secondary:
printable_value += ' (secondary)'
return printable_value
def update_emails(emails, data):
"""
Typically canonical user email
May not be present in the list method response
"""
if not "email" in data:
return emails
for e in data["email"]:
is_primary = e.get("signupEmailMetadata", {}).get("primary")
email = Contact(e["value"], is_primary)
if email.value in emails:
if is_primary:
emails[email.value].is_secondary = False
else:
emails[email.value] = email
return emails
def is_email_google_account(httpx_client, auth, cookies, email, key):
host = "https://people-pa.clients6.google.com"
url = "/v2/people/lookup?key={}".format(key)
body = """id={}&type=EMAIL&matchType=EXACT&coreIdParams.useRealtimeNotificationExpandedAcls=true&requestMask.includeField.paths=person.email&requestMask.includeField.paths=person.gender&requestMask.includeField.paths=person.in_app_reachability&requestMask.includeField.paths=person.metadata&requestMask.includeField.paths=person.name&requestMask.includeField.paths=person.phone&requestMask.includeField.paths=person.photo&requestMask.includeField.paths=person.read_only_profile_info&requestMask.includeContainer=AFFINITY&requestMask.includeContainer=PROFILE&requestMask.includeContainer=DOMAIN_PROFILE&requestMask.includeContainer=ACCOUNT&requestMask.includeContainer=EXTERNAL_ACCOUNT&requestMask.includeContainer=CIRCLE&requestMask.includeContainer=DOMAIN_CONTACT&requestMask.includeContainer=DEVICE_CONTACT&requestMask.includeContainer=GOOGLE_GROUP&requestMask.includeContainer=CONTACT"""
headers = {
"X-HTTP-Method-Override": "GET",
"Authorization": auth,
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://chat.google.com"
}
req = httpx_client.post(host + url, data=body.format(email), headers=headers, cookies=cookies)
data = json.loads(req.text)
#pprint(data)
if "error" in data and "Request had invalid authentication credentials" in data["error"]["message"]:
exit("[-] Cookies/Tokens seems expired, please verify them.")
elif "error" in data:
print("[-] Error :")
pprint(data)
exit()
elif not "matches" in data:
exit("[-] This email address does not belong to a Google Account.")
return data
def get_account_data(httpx_client, gaiaID, internal_auth, internal_token, config):
# Bypass method
req_headers = {
"Origin": "https://drive.google.com",
"authorization": internal_auth,
"Host": "people-pa.clients6.google.com"
}
headers = {**config.headers, **req_headers}
url = f"https://people-pa.clients6.google.com/v2/people?person_id={gaiaID}&request_mask.include_container=PROFILE&request_mask.include_container=DOMAIN_PROFILE&request_mask.include_field.paths=person.metadata.best_display_name&request_mask.include_field.paths=person.photo&request_mask.include_field.paths=person.cover_photo&request_mask.include_field.paths=person.email&request_mask.include_field.paths=person.organization&request_mask.include_field.paths=person.location&request_mask.include_field.paths=person.email&requestMask.includeField.paths=person.phone&core_id_params.enable_private_names=true&requestMask.includeField.paths=person.read_only_profile_info&key={internal_token}"
req = httpx_client.get(url, headers=headers)
data = json.loads(req.text)
# pprint(data)
if "error" in data and "Request had invalid authentication credentials" in data["error"]["message"]:
exit("[-] Cookies/Tokens seems expired, please verify them.")
elif "error" in data:
print("[-] Error :")
pprint(data)
exit()
if data["personResponse"][0]["status"].lower() == "not_found":
return False
name = get_account_name(httpx_client, gaiaID, data, internal_auth, internal_token, config)
profile_data = data["personResponse"][0]["person"]
profile_pics = []
for p in profile_data["photo"]:
profile_pics.append(Picture(p["url"], p.get("isDefault", False)))
# mostly is default
cover_pics = []
for p in profile_data["coverPhoto"]:
cover_pics.append(Picture(p["imageUrl"], p["isDefault"]))
emails = update_emails({}, profile_data)
# absent if user didn't enter or hide them
phones = []
if "phone" in profile_data:
for p in profile_data["phone"]:
phones.append(f'{p["value"]} ({p["type"]})')
# absent if user didn't enter or hide them
locations = []
if "location" in profile_data:
for l in profile_data["location"]:
locations.append(l["value"] if not l.get("current") else f'{l["value"]} (current)')
# absent if user didn't enter or hide them
organizations = []
if "organization" in profile_data:
organizations = (f'{o["name"]} ({o["type"]})' for o in profile_data["organization"])
return {"name": name, "profile_pics": profile_pics, "cover_pics": cover_pics,
"organizations": ', '.join(organizations), "locations": ', '.join(locations),
"emails_set": emails, "phones": ', '.join(phones)}
def get_account_name(httpx_client, gaiaID, data, internal_auth, internal_token, config):
try:
name = data["personResponse"][0]["person"]["metadata"]["bestDisplayName"]["displayName"]
except KeyError:
pass # We fallback on the classic method
else:
return name
# Classic method, but requires the target to have at least 1 GMaps contribution
req = httpx_client.get(f"https://www.google.com/maps/contrib/{gaiaID}")
gmaps_source = req.text
match = re.search(r'<meta content="Contributions by (.*?)" itemprop="name">', gmaps_source)
if not match:
return None
return match[1]
def image_hash(img):
flathash = imagehash.average_hash(img)
return flathash
def detect_default_profile_pic(flathash):
if flathash - imagehash.hex_to_flathash("000018183c3c0000", 8) < 10 :
return True
return False
def sanitize_location(location):
not_country = False
not_town = False
town = "?"
country = "?"
if "city" in location:
town = location["city"]
elif "village" in location:
town = location["village"]
elif "town" in location:
town = location["town"]
elif "municipality" in location:
town = location["municipality"]
else:
not_town = True
if not "country" in location:
not_country = True
location["country"] = country
if not_country and not_town:
return False
location["town"] = town
return location
def get_driverpath():
driver_path = shutil.which("chromedriver") or shutil.which("chromium.chromedriver")
if driver_path:
return driver_path
if within_docker():
chromedrivermanager_silent = ChromeDriverManager(print_first_line=False, log_level=0, path="/usr/src/app")
else:
chromedrivermanager_silent = ChromeDriverManager(print_first_line=False, log_level=0)
driver = chromedrivermanager_silent.driver
driverpath = chromedrivermanager_silent.driver_cache.find_driver(driver)
if driverpath:
return driverpath
else:
print("[Webdrivers Manager] I'm updating the chromedriver...")
if within_docker():
driver_path = ChromeDriverManager(path="/usr/src/app").install()
else:
driver_path = ChromeDriverManager().install()
print("[Webdrivers Manager] The chromedriver has been updated !\n")
return driver_path
def get_chrome_options_args(is_headless):
chrome_options = Options()
chrome_options.add_argument('--log-level=3')
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
chrome_options.add_argument("--no-sandbox")
if is_headless:
chrome_options.add_argument("--headless")
if (Os().wsl or Os().windows) and is_headless:
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-setuid-sandbox")
chrome_options.add_argument("--no-first-run")
chrome_options.add_argument("--no-zygote")
chrome_options.add_argument("--single-process")
chrome_options.add_argument("--disable-features=VizDisplayCompositor")
return chrome_options
def inject_osid(cookies, service, config):
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
cookies["OSID"] = out["osids"][service]
return cookies
def gen_sapisidhash(sapisid: str, origin: str, timestamp: str = str(int(time()))) -> str:
return f"{timestamp}_{hashlib.sha1(' '.join([timestamp, sapisid, origin]).encode()).hexdigest()}"

View File

@@ -1,202 +0,0 @@
import json
import urllib.parse
from io import BytesIO
from urllib.parse import unquote as parse_url
from PIL import Image
from lib.search import search as gdoc_search
from lib.utils import *
def get_channel_data(client, channel_url):
data = None
retries = 2
for retry in list(range(retries))[::-1]:
req = client.get(f"{channel_url}/about")
source = req.text
try:
data = json.loads(source.split('var ytInitialData = ')[1].split(';</script>')[0])
except (KeyError, IndexError):
if retry == 0:
return False
continue
else:
break
handle = data["metadata"]["channelMetadataRenderer"]["vanityChannelUrl"].split("/")[-1]
tabs = [x[list(x.keys())[0]] for x in data["contents"]["twoColumnBrowseResultsRenderer"]["tabs"]]
about_tab = [x for x in tabs if x["title"].lower() == "about"][0]
channel_details = about_tab["content"]["sectionListRenderer"]["contents"][0]["itemSectionRenderer"]["contents"][0]["channelAboutFullMetadataRenderer"]
out = {
"name": None,
"description": None,
"channel_urls": [],
"email_contact": False,
"views": None,
"joined_date": None,
"primary_links": [],
"country": None
}
out["name"] = data["metadata"]["channelMetadataRenderer"]["title"]
out["channel_urls"].append(data["metadata"]["channelMetadataRenderer"]["channelUrl"])
out["channel_urls"].append(f"https://www.youtube.com/c/{handle}")
out["channel_urls"].append(f"https://www.youtube.com/user/{handle}")
out["email_contact"] = "businessEmailLabel" in channel_details
out["description"] = channel_details["description"]["simpleText"] if "description" in channel_details else None
out["views"] = channel_details["viewCountText"]["simpleText"].split(" ")[0] if "viewCountText" in channel_details else None
out["joined_date"] = channel_details["joinedDateText"]["runs"][1]["text"] if "joinedDateText" in channel_details else None
out["country"] = channel_details["country"]["simpleText"] if "country" in channel_details else None
if "primaryLinks" in channel_details:
for primary_link in channel_details["primaryLinks"]:
title = primary_link["title"]["simpleText"]
url = parse_url(primary_link["navigationEndpoint"]["urlEndpoint"]["url"].split("&q=")[-1])
out["primary_links"].append({"title": title, "url": url})
return out
def youtube_channel_search(client, query):
try:
link = "https://www.youtube.com/results?search_query={}&sp=EgIQAg%253D%253D"
req = client.get(link.format(urllib.parse.quote(query)))
source = req.text
data = json.loads(
source.split('window["ytInitialData"] = ')[1].split('window["ytInitialPlayerResponse"]')[0].split(';\n')[0])
channels = \
data["contents"]["twoColumnSearchResultsRenderer"]["primaryContents"]["sectionListRenderer"]["contents"][0][
"itemSectionRenderer"]["contents"]
results = {"channels": [], "length": len(channels)}
for channel in channels:
if len(results["channels"]) >= 10:
break
title = channel["channelRenderer"]["title"]["simpleText"]
if not query.lower() in title.lower():
continue
avatar_link = channel["channelRenderer"]["thumbnail"]["thumbnails"][0]["url"].split('=')[0]
if avatar_link[:2] == "//":
avatar_link = "https:" + avatar_link
profile_url = "https://youtube.com" + channel["channelRenderer"]["navigationEndpoint"]["browseEndpoint"][
"canonicalBaseUrl"]
req = client.get(avatar_link)
img = Image.open(BytesIO(req.content))
hash = str(image_hash(img))
results["channels"].append({"profile_url": profile_url, "name": title, "hash": hash})
return results
except (KeyError, IndexError):
return False
def youtube_channel_search_gdocs(client, query, data_path, gdocs_public_doc):
search_query = f"site:youtube.com/channel \\\"{query}\\\""
search_results = gdoc_search(search_query, data_path, gdocs_public_doc)
channels = []
for result in search_results:
sanitized = "https://youtube.com/" + ('/'.join(result["link"].split('/')[3:5]).split("?")[0])
if sanitized not in channels:
channels.append(sanitized)
if not channels:
return False
results = {"channels": [], "length": len(channels)}
channels = channels[:5]
for profile_url in channels:
data = None
avatar_link = None
retries = 2
for retry in list(range(retries))[::-1]:
req = client.get(profile_url, follow_redirects=True)
source = req.text
try:
data = json.loads(source.split('var ytInitialData = ')[1].split(';</script>')[0])
avatar_link = data["metadata"]["channelMetadataRenderer"]["avatar"]["thumbnails"][0]["url"].split('=')[0]
except (KeyError, IndexError) as e:
#import pdb; pdb.set_trace()
if retry == 0:
return False
continue
else:
break
req = client.get(avatar_link)
img = Image.open(BytesIO(req.content))
hash = str(image_hash(img))
title = data["metadata"]["channelMetadataRenderer"]["title"]
results["channels"].append({"profile_url": profile_url, "name": title, "hash": hash})
return results
def get_channels(client, query, data_path, gdocs_public_doc):
from_youtube = youtube_channel_search(client, query)
from_gdocs = youtube_channel_search_gdocs(client, query, data_path, gdocs_public_doc)
to_process = []
if from_youtube:
from_youtube["origin"] = "youtube"
to_process.append(from_youtube)
if from_gdocs:
from_gdocs["origin"] = "gdocs"
to_process.append(from_gdocs)
if not to_process:
return False
return to_process
def get_confidence(data, query, hash):
score_steps = 4
for source_nb, source in enumerate(data):
for channel_nb, channel in enumerate(source["channels"]):
score = 0
if hash == imagehash.hex_to_flathash(channel["hash"], 8):
score += score_steps * 4
if query == channel["name"]:
score += score_steps * 3
if query in channel["name"]:
score += score_steps * 2
if ((source["origin"] == "youtube" and source["length"] <= 5) or
(source["origin"] == "google" and source["length"] <= 4)):
score += score_steps
data[source_nb]["channels"][channel_nb]["score"] = score
channels = []
for source in data:
for channel in source["channels"]:
found_better = False
for source2 in data:
for channel2 in source2["channels"]:
if channel["profile_url"] == channel2["profile_url"]:
if channel2["score"] > channel["score"]:
found_better = True
break
if found_better:
break
if found_better:
continue
else:
channels.append(channel)
channels = sorted([json.loads(chan) for chan in set([json.dumps(channel) for channel in channels])],
key=lambda k: k['score'], reverse=True)
panels = sorted(set([c["score"] for c in channels]), reverse=True)
if not channels or (panels and panels[0] <= 0):
return 0, []
maxscore = sum([p * score_steps for p in range(1, score_steps + 1)])
for panel in panels:
chans = [c for c in channels if c["score"] == panel]
if len(chans) > 1:
panel -= 5
return (panel / maxscore * 100), chans
def extract_usernames(channels):
return [chan['profile_url'].split("/user/")[1] for chan in channels if "/user/" in chan['profile_url']]

2
main.py Normal file
View File

@@ -0,0 +1,2 @@
if __name__ == "__main__":
from ghunt import ghunt; ghunt.main()

View File

@@ -1,133 +0,0 @@
#!/usr/bin/env python3
import json
import sys
import os
from datetime import datetime
from io import BytesIO
from os.path import isfile
from pathlib import Path
from pprint import pprint
import httpx
from PIL import Image
import config
from lib.utils import *
from lib.banner import banner
def doc_hunt(doc_link):
banner()
tmprinter = TMPrinter()
if not doc_link:
exit("Please give the link to a Google resource.\nExample : https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms")
is_within_docker = within_docker()
if is_within_docker:
print("[+] Docker detected, profile pictures will not be saved.")
doc_id = ''.join([x for x in doc_link.split("?")[0].split("/") if len(x) in (33, 44)])
if doc_id:
print(f"\nDocument ID : {doc_id}\n")
else:
exit("\nDocument ID not found.\nPlease make sure you have something that looks like this in your link :\1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms")
if not isfile(config.data_path):
exit("Please generate cookies and tokens first, with the check_and_gen.py script.")
internal_token = ""
cookies = {}
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
internal_token = out["keys"]["internal"]
cookies = out["cookies"]
headers = {**config.headers, **{"X-Origin": "https://drive.google.com"}}
client = httpx.Client(cookies=cookies, headers=headers)
url = f"https://clients6.google.com/drive/v2beta/files/{doc_id}?fields=alternateLink%2CcopyRequiresWriterPermission%2CcreatedDate%2Cdescription%2CdriveId%2CfileSize%2CiconLink%2Cid%2Clabels(starred%2C%20trashed)%2ClastViewedByMeDate%2CmodifiedDate%2Cshared%2CteamDriveId%2CuserPermission(id%2Cname%2CemailAddress%2Cdomain%2Crole%2CadditionalRoles%2CphotoLink%2Ctype%2CwithLink)%2Cpermissions(id%2Cname%2CemailAddress%2Cdomain%2Crole%2CadditionalRoles%2CphotoLink%2Ctype%2CwithLink)%2Cparents(id)%2Ccapabilities(canMoveItemWithinDrive%2CcanMoveItemOutOfDrive%2CcanMoveItemOutOfTeamDrive%2CcanAddChildren%2CcanEdit%2CcanDownload%2CcanComment%2CcanMoveChildrenWithinDrive%2CcanRename%2CcanRemoveChildren%2CcanMoveItemIntoTeamDrive)%2Ckind&supportsTeamDrives=true&enforceSingleParent=true&key={internal_token}"
retries = 100
for retry in range(retries):
req = client.get(url)
if "File not found" in req.text:
exit("[-] This file does not exist or is not public")
elif "rateLimitExceeded" in req.text:
tmprinter.out(f"[-] Rate-limit detected, retrying... {retry+1}/{retries}")
continue
else:
break
else:
tmprinter.clear()
exit("[-] Rate-limit exceeded. Try again later.")
if '"reason": "keyInvalid"' in req.text:
exit("[-] Your key is invalid, try regenerating your cookies & keys.")
tmprinter.clear()
data = json.loads(req.text)
# Extracting informations
# Dates
created_date = datetime.strptime(data["createdDate"], '%Y-%m-%dT%H:%M:%S.%fz')
modified_date = datetime.strptime(data["modifiedDate"], '%Y-%m-%dT%H:%M:%S.%fz')
print(f"[+] Creation date : {created_date.strftime('%Y/%m/%d %H:%M:%S')} (UTC)")
print(f"[+] Last edit date : {modified_date.strftime('%Y/%m/%d %H:%M:%S')} (UTC)")
# Permissions
user_permissions = []
if data["userPermission"]:
if data["userPermission"]["id"] == "me":
user_permissions.append(data["userPermission"]["role"])
if "additionalRoles" in data["userPermission"]:
user_permissions += data["userPermission"]["additionalRoles"]
public_permissions = []
owner = None
for permission in data["permissions"]:
if permission["id"] in ["anyoneWithLink", "anyone"]:
public_permissions.append(permission["role"])
if "additionalRoles" in data["permissions"]:
public_permissions += permission["additionalRoles"]
elif permission["role"] == "owner":
owner = permission
print("\nPublic permissions :")
for permission in public_permissions:
print(f"- {permission}")
if public_permissions != user_permissions:
print("[+] You have special permissions :")
for permission in user_permissions:
print(f"- {permission}")
if owner:
print("\n[+] Owner found !\n")
print(f"Name : {owner['name']}")
print(f"Email : {owner['emailAddress']}")
print(f"Google ID : {owner['id']}")
# profile picture
profile_pic_link = owner['photoLink']
req = client.get(profile_pic_link)
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_flathash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_flathash)
if not is_default_profile_pic and not is_within_docker:
print("\n[+] Custom profile picture !")
print(f"=> {profile_pic_link}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'{owner["emailAddress"]}.jpg', 'wb').write(req.content)
print("Profile picture saved !\n")
else:
print("\n[-] Default profile picture\n")

View File

@@ -1,228 +0,0 @@
#!/usr/bin/env python3
import json
import sys
import os
from datetime import datetime
from io import BytesIO
from os.path import isfile
from pathlib import Path
from pprint import pprint
import httpx
from PIL import Image
from geopy.geocoders import Nominatim
import config
from lib.banner import banner
import lib.gmaps as gmaps
import lib.youtube as ytb
from lib.photos import gpics
from lib.utils import *
import lib.calendar as gcalendar
def email_hunt(email):
banner()
if not email:
exit("Please give a valid email.\nExample : larry@google.com")
if not isfile(config.data_path):
exit("Please generate cookies and tokens first, with the check_and_gen.py script.")
chat_auth = ""
chat_key = ""
internal_auth = ""
internal_token = ""
cookies = {}
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
chat_auth = out["chat_auth"]
chat_key = out["keys"]["chat"]
internal_auth = out["internal_auth"]
internal_token = out["keys"]["internal"]
cookies = out["cookies"]
client = httpx.Client(cookies=cookies, headers=config.headers)
data = is_email_google_account(client, chat_auth, cookies, email,
chat_key)
is_within_docker = within_docker()
if is_within_docker:
print("[+] Docker detected, profile pictures will not be saved.")
geolocator = Nominatim(user_agent="nominatim")
print(f"[+] {len(data['matches'])} account found !")
for user in data["matches"]:
print("\n------------------------------\n")
gaiaID = user["personId"][0]
email = user["lookupId"]
infos = data["people"][gaiaID]
# get name & profile picture
account = get_account_data(client, gaiaID, internal_auth, internal_token, config)
name = account["name"]
if name:
print(f"Name : {name}")
else:
if "name" not in infos:
print("[-] Couldn't find name")
else:
for i in range(len(infos["name"])):
if 'displayName' in infos['name'][i].keys():
name = infos["name"][i]["displayName"]
print(f"Name : {name}")
organizations = account["organizations"]
if organizations:
print(f"Organizations : {organizations}")
locations = account["locations"]
if locations:
print(f"Locations : {locations}")
# profile picture
profile_pic_url = account.get("profile_pics") and account["profile_pics"][0].url
if profile_pic_url:
req = client.get(profile_pic_url)
# TODO: make sure it's necessary now
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_flathash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_flathash)
if not is_default_profile_pic:
print("\n[+] Custom profile picture !")
print(f"=> {profile_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'{email}.jpg', 'wb').write(req.content)
print("Profile picture saved !")
else:
print("\n[-] Default profile picture")
# cover profile picture
cover_pic = account.get("cover_pics") and account["cover_pics"][0]
if cover_pic and not cover_pic.is_default:
cover_pic_url = cover_pic.url
req = client.get(cover_pic_url)
print("\n[+] Custom profile cover picture !")
print(f"=> {cover_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'cover_{email}.jpg', 'wb').write(req.content)
print("Cover profile picture saved !")
# last edit
try:
timestamp = int(infos["metadata"]["lastUpdateTimeMicros"][:-3])
last_edit = datetime.utcfromtimestamp(timestamp).strftime("%Y/%m/%d %H:%M:%S (UTC)")
print(f"\nLast profile edit : {last_edit}")
except KeyError:
last_edit = None
print(f"\nLast profile edit : Not found")
canonical_email = ""
emails = update_emails(account["emails_set"], infos)
if emails and len(list(emails)) == 1:
if list(emails.values())[0].is_normalized(email):
new_email = list(emails.keys())[0]
if email != new_email:
canonical_email = f' (canonical email is {new_email})'
emails = []
print(f"\nEmail : {email}{canonical_email}\nGaia ID : {gaiaID}\n")
if emails:
print(f"Contact emails : {', '.join(map(str, emails.values()))}")
phones = account["phones"]
if phones:
print(f"Contact phones : {phones}")
# is bot?
if "extendedData" in infos:
isBot = infos["extendedData"]["hangoutsExtendedData"]["isBot"]
if isBot:
print("Hangouts Bot : Yes !")
else:
print("Hangouts Bot : No")
else:
print("Hangouts Bot : Unknown")
# decide to check YouTube
ytb_hunt = False
try:
services = [x["appType"].lower() if x["appType"].lower() != "babel" else "hangouts" for x in
infos["inAppReachability"]]
if name and (config.ytb_hunt_always or "youtube" in services):
ytb_hunt = True
print("\n[+] Activated Google services :")
print('\n'.join(["- " + x.capitalize() for x in services]))
except KeyError:
ytb_hunt = True
print("\n[-] Unable to fetch connected Google services.")
# check YouTube
if name and ytb_hunt:
confidence = None
data = ytb.get_channels(client, name, config.data_path,
config.gdocs_public_doc)
if not data:
print("\n[-] YouTube channel not found.")
else:
confidence, channels = ytb.get_confidence(data, name, profile_pic_flathash)
if confidence:
print(f"\n[+] YouTube channel (confidence => {confidence}%) :")
for channel in channels:
print(f"- [{channel['name']}] {channel['profile_url']}")
possible_usernames = ytb.extract_usernames(channels)
if possible_usernames:
print("\n[+] Possible usernames found :")
for username in possible_usernames:
print(f"- {username}")
else:
print("\n[-] YouTube channel not found.")
# TODO: return gpics function output here
#gpics(gaiaID, client, cookies, config.headers, config.regexs["albums"], config.regexs["photos"],
# config.headless)
# reviews
reviews = gmaps.scrape(gaiaID, client, cookies, config, config.headers, config.regexs["review_loc_by_id"], config.headless)
if reviews:
confidence, locations = gmaps.get_confidence(geolocator, reviews, config.gmaps_radius)
print(f"\n[+] Probable location (confidence => {confidence}) :")
loc_names = []
for loc in locations:
loc_names.append(
f"- {loc['avg']['town']}, {loc['avg']['country']}"
)
loc_names = set(loc_names) # delete duplicates
for loc in loc_names:
print(loc)
# Google Calendar
calendar_response = gcalendar.fetch(email, client, config)
if calendar_response:
print("[+] Public Google Calendar found !")
events = calendar_response["events"]
if events:
gcalendar.out(events)
else:
print("=> No recent events found.")
else:
print("[-] No public Google Calendar.")

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env python3
import json
import sys
import os
from datetime import datetime
from io import BytesIO
from os.path import isfile
from pathlib import Path
from pprint import pprint
import httpx
from PIL import Image
from geopy.geocoders import Nominatim
import config
from lib.banner import banner
import lib.gmaps as gmaps
import lib.youtube as ytb
from lib.utils import *
def gaia_hunt(gaiaID):
banner()
if not gaiaID:
exit("Please give a valid GaiaID.\nExample : 113127526941309521065")
if not isfile(config.data_path):
exit("Please generate cookies and tokens first, with the check_and_gen.py script.")
internal_auth = ""
internal_token = ""
cookies = {}
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
internal_auth = out["internal_auth"]
internal_token = out["keys"]["internal"]
cookies = out["cookies"]
client = httpx.Client(cookies=cookies, headers=config.headers)
account = get_account_data(client, gaiaID, internal_auth, internal_token, config)
if not account:
exit("[-] No account linked to this Gaia ID.")
is_within_docker = within_docker()
if is_within_docker:
print("[+] Docker detected, profile pictures will not be saved.")
geolocator = Nominatim(user_agent="nominatim")
# get name & other info
name = account["name"]
if name:
print(f"Name : {name}")
organizations = account["organizations"]
if organizations:
print(f"Organizations : {organizations}")
locations = account["locations"]
if locations:
print(f"Locations : {locations}")
# get profile picture
profile_pic_url = account.get("profile_pics") and account["profile_pics"][0].url
if profile_pic_url:
req = client.get(profile_pic_url)
# TODO: make sure it's necessary now
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_flathash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_flathash)
if not is_default_profile_pic:
print("\n[+] Custom profile picture !")
print(f"=> {profile_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'{gaiaID}.jpg', 'wb').write(req.content)
print("Profile picture saved !")
else:
print("\n[-] Default profile picture")
# cover profile picture
cover_pic = account.get("cover_pics") and account["cover_pics"][0]
if cover_pic and not cover_pic.is_default:
req = client.get(cover_pic_url)
print("\n[+] Custom profile cover picture !")
print(f"=> {cover_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'cover_{email}.jpg', 'wb').write(req.content)
print("Cover profile picture saved !")
print(f"\nGaia ID : {gaiaID}")
emails = account["emails_set"]
if emails:
print(f"Contact emails : {', '.join(map(str, emails.values()))}")
phones = account["phones"]
if phones:
print(f"Contact phones : {phones}")
# check YouTube
if name:
confidence = None
data = ytb.get_channels(client, name, config.data_path,
config.gdocs_public_doc)
if not data:
print("\n[-] YouTube channel not found.")
else:
confidence, channels = ytb.get_confidence(data, name, profile_pic_flathash)
if confidence:
print(f"\n[+] YouTube channel (confidence => {confidence}%) :")
for channel in channels:
print(f"- [{channel['name']}] {channel['profile_url']}")
possible_usernames = ytb.extract_usernames(channels)
if possible_usernames:
print("\n[+] Possible usernames found :")
for username in possible_usernames:
print(f"- {username}")
else:
print("\n[-] YouTube channel not found.")
# reviews
reviews = gmaps.scrape(gaiaID, client, cookies, config, config.headers, config.regexs["review_loc_by_id"], config.headless)
if reviews:
confidence, locations = gmaps.get_confidence(geolocator, reviews, config.gmaps_radius)
print(f"\n[+] Probable location (confidence => {confidence}) :")
loc_names = []
for loc in locations:
loc_names.append(
f"- {loc['avg']['town']}, {loc['avg']['country']}"
)
loc_names = set(loc_names) # delete duplicates
for loc in loc_names:
print(loc)

View File

@@ -1,250 +0,0 @@
#!/usr/bin/env python3
import json
import sys
from datetime import datetime
from datetime import date
from io import BytesIO
from os.path import isfile
from pathlib import Path
from pprint import pprint
import httpx
import wayback
from PIL import Image
from bs4 import BeautifulSoup as bs
from geopy.geocoders import Nominatim
import config
from lib.banner import banner
import lib.gmaps as gmaps
import lib.youtube as ytb
from lib.utils import *
def find_gaiaID(body):
"""
We don't use a regex to avoid extracting an other gaiaID
for example if the target had put a secondary Google Plus blog in his channel social links.
"""
# 1st method ~ 2014
try:
publisher = body.find("link", {"rel": "publisher"})
gaiaID = publisher.attrs["href"].split("/")[-1]
except:
pass
else:
if gaiaID:
return gaiaID
# 2nd method ~ 2015
try:
author_links = [x.find_next("link") for x in body.find_all("span", {"itemprop": "author"})]
valid_author_link = [x for x in author_links if "plus.google.com/" in x.attrs["href"]][0]
gaiaID = valid_author_link.attrs["href"].split("/")[-1]
except:
pass
else:
if gaiaID:
return gaiaID
# 3rd method ~ 2019
try:
data = json.loads(str(body).split('window["ytInitialData"] = ')[1].split('window["ytInitialPlayerResponse"]')[0].strip().strip(";"))
gaiaID = data["metadata"]["channelMetadataRenderer"]["plusPageLink"].split("/")[-1]
except:
pass
else:
if gaiaID:
return gaiaID
def analyze_snapshots(client, wb_client, channel_url, dates):
body = None
record = None
for record in wb_client.search(channel_url, to_date=dates["to"], from_date=dates["from"]):
try:
req = client.get(record.raw_url)
if req.status_code == 429:
continue # Rate-limit is fucked up and is snapshot-based, we can just take the next snapshot
except Exception as err:
pass
else:
if re.compile(config.regexs["gplus"]).findall(req.text):
body = bs(req.text, 'html.parser')
#print(record)
print(f'[+] Snapshot : {record.timestamp.strftime("%d/%m/%Y")}')
break
else:
return None
gaiaID = find_gaiaID(body)
return gaiaID
def check_channel(client, wb_client, channel_url):
# Fast check (no doubt that GaiaID is present in this period)
dates = {"to": date(2019, 12, 31), "from": date(2014, 1, 1)}
gaiaID = analyze_snapshots(client, wb_client, channel_url, dates)
# Complete check
if not gaiaID:
dates = {"to": date(2020, 7, 31), "from": date(2013, 6, 3)}
gaiaID = analyze_snapshots(client, wb_client, channel_url, dates)
return gaiaID
def launch_checks(client, wb_client, channel_data):
for channel_url in channel_data["channel_urls"]:
gaiaID = check_channel(client, wb_client, channel_url)
if gaiaID:
return gaiaID
return False
def youtube_hunt(channel_url):
banner()
if not channel_url:
exit("Please give a valid channel URL.\nExample : https://www.youtube.com/user/PewDiePie")
if not isfile(config.data_path):
exit("Please generate cookies and tokens first, with the check_and_gen.py script.")
internal_auth = ""
internal_token = ""
cookies = {}
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
internal_auth = out["internal_auth"]
internal_token = out["keys"]["internal"]
cookies = out["cookies"]
if not "PREF" in cookies:
pref_cookies = {"PREF": "tz=Europe.Paris&f6=40000000&hl=en"} # To set the lang in english
cookies = {**cookies, **pref_cookies}
client = httpx.Client(cookies=cookies, headers=config.headers)
is_within_docker = within_docker()
if is_within_docker:
print("[+] Docker detected, profile pictures will not be saved.")
geolocator = Nominatim(user_agent="nominatim")
print("\n📌 [Youtube channel]")
channel_data = ytb.get_channel_data(client, channel_url)
if channel_data:
is_channel_existing = True
print(f'[+] Channel name : {channel_data["name"]}\n')
else:
is_channel_existing = False
print("[-] Channel not found.\nSearching for a trace in the archives...\n")
channel_data = {
"name": None,
"description": None,
"channel_urls": [channel_url],
"email_contact": False,
"views": None,
"joined_date": None,
"primary_links": [],
"country": None
}
wb_client = wayback.WaybackClient()
gaiaID = launch_checks(client, wb_client, channel_data)
if gaiaID:
print(f"[+] GaiaID => {gaiaID}\n")
else:
print("[-] No interesting snapshot found.\n")
if is_channel_existing:
if channel_data["email_contact"]:
print(f'[+] Email on profile : available !')
else:
print(f'[-] Email on profile : not available.')
if channel_data["country"]:
print(f'[+] Country : {channel_data["country"]}')
print()
if channel_data["description"]:
print(f'🧬 Description : {channel_data["description"]}')
if channel_data["views"]:
print(f'🧬 Total views : {channel_data["views"]}')
if channel_data["joined_date"]:
print(f'🧬 Joined date : {channel_data["joined_date"]}')
if channel_data["primary_links"]:
print(f'\n[+] Primary links ({len(channel_data["primary_links"])} found)')
for primary_link in channel_data["primary_links"]:
print(f'- {primary_link["title"]} => {primary_link["url"]}')
if not gaiaID:
exit()
print("\n📌 [Google account]")
# get name & profile picture
account = get_account_data(client, gaiaID, internal_auth, internal_token, config)
name = account["name"]
if name:
print(f"Name : {name}")
# profile picture
profile_pic_url = account.get("profile_pics") and account["profile_pics"][0].url
req = client.get(profile_pic_url)
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_hash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_hash)
if profile_pic_url:
req = client.get(profile_pic_url)
# TODO: make sure it's necessary now
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_flathash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_flathash)
if not is_default_profile_pic:
print("\n[+] Custom profile picture !")
print(f"=> {profile_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'{gaiaID}.jpg', 'wb').write(req.content)
print("Profile picture saved !")
else:
print("\n[-] Default profile picture")
# cover profile picture
cover_pic = account.get("cover_pics") and account["cover_pics"][0]
if cover_pic and not cover_pic.is_default:
cover_pic_url = cover_pic.url
req = client.get(cover_pic_url)
print("\n[+] Custom profile cover picture !")
print(f"=> {cover_pic_url}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'cover_{gaiaID}.jpg', 'wb').write(req.content)
print("Cover profile picture saved !")
# reviews
reviews = gmaps.scrape(gaiaID, client, cookies, config, config.headers, config.regexs["review_loc_by_id"], config.headless)
if reviews:
confidence, locations = gmaps.get_confidence(geolocator, reviews, config.gmaps_radius)
print(f"\n[+] Probable location (confidence => {confidence}) :")
loc_names = []
for loc in locations:
loc_names.append(
f"- {loc['avg']['town']}, {loc['avg']['country']}"
)
loc_names = set(loc_names) # delete duplicates
for loc in loc_names:
print(loc)

View File

@@ -1,15 +1,15 @@
geopy
httpx>=0.20.0
selenium-wire>=4.5.5
selenium>=4.0.0
imagehash
pillow
python-dateutil
colorama
beautifultable
termcolor
webdriver-manager==3.5.4
wayback
bs4
packaging
aiohttp==3.8.3 # Avoid this weird error : AttributeError: module 'aiohttp._http_writer' has no attribute '_serialize_headers'
geopy==2.3.0
httpx[http2]==0.23.1
imagehash==4.3.1
pillow==9.3.0
python-dateutil==2.8.2
rich==12.6.0
beautifultable==1.1.0
beautifulsoup4==4.11.1
alive-progress==2.4.1
protobuf==4.21.9
trio==0.21.0
autoslot==2021.10.1
humanize==4.4.0
inflection==0.5.1
jsonpickle==2.2.0

28
setup.py Normal file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
requirements = [x.strip() for x in open("requirements.txt", "r").readlines()]
requirements = [f"{line.split('#egg=')[-1]} @ {line}" if "#egg=" in line else line for line in requirements]
setup(
name='ghunt',
version='2.0.1',
packages=find_packages(include=['ghunt', 'ghunt.*']),
license='AGPL-3.0',
license_files = ('LICENSE.md'),
author='mxrch',
author_email='mxrch.dev@pm.me',
description='An offensive Google framework.',
long_description='GHunt is an offensive Google framework, designed for pentest and OSINT.',
long_description_content_type='text/x-rst',
url='https://github.com/mxrch/ghunt',
keywords=["osint", "pentest", "cybersecurity", "investigation", "hideandsec", "malfrats"],
entry_points={
'console_scripts': [
'ghunt = ghunt.ghunt:main'
]
},
install_requires=requirements
)