1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255 | """Java plugin for THAT."""
from collections import OrderedDict
VERSION = "1.1.0"
"""Version of this set of plugin definitions."""
MINIMUM_THAT_VERSION = "1.1.0"
"""Minimum THAT version required to run these plugin definitions."""
NAME = "java"
PRIORITY = 2
GET_TANIUM_DATA = OrderedDict()
ANALYZE_DATA = OrderedDict()
GET_INTERNET_DATA = [
"jre_vul_count",
"latest_jre_vul_version",
]
# ask the question 'Get Installed Applications containing "java" from all machines'
# and store results in "java.csv"
GET_TANIUM_DATA["java.csv"] = {
"filters": [],
"sensors": ["Installed Applications, that contains:java"],
}
# ask the question 'Get Installed Applications that start with "java" from all machines'
# and store results in "jre.csv"
GET_TANIUM_DATA["jre.csv"] = {
"filters": [],
"sensors": ["Installed Applications, that starts with:java"],
}
# ask the question 'Get Online from all machines where Installed Applications contains "java"'
# and store results in "javatargets.csv"
GET_TANIUM_DATA["javatargets.csv"] = {
"filters": ["Installed Applications, that contains:java"],
"sensors": ["Online"],
}
ANALYZE_DATA["cleaned_java_df"] = """
# clean out noise from java.csv
csv = "java.csv"
df = self.load_csv_as_df(csv)
result = self.clean_df(df, columns=["Name"], add_values=["Scan Error", "Java not installed"])
"""
ANALYZE_DATA["cleaned_jre_df"] = """
# clean out noise from jre.csv
csv = "jre.csv"
df = self.load_csv_as_df(csv)
result = self.clean_df(df, columns=["Name"])
"""
ANALYZE_DATA["java_install_total"] = """
# total number endpoints reporting any java product installed
csv = "javatargets.csv"
df = self.load_csv_as_df(csv)
result = df['Count'].sum()
"""
ANALYZE_DATA["product_count"] = """
# get total count of java products installed across endpoints
df = self.get_result("cleaned_java_df")
result = len(df['Name'].unique())
"""
ANALYZE_DATA["install_count"] = """
# get total count of java products installed across endpoints
df = self.get_result("cleaned_java_df")
result = df['Count'].sum()
"""
ANALYZE_DATA["install_avg"] = """
# average install per endpoint given number of machines reporting java product installed
java_install_total = self.get_result("java_install_total")
install_count = self.get_result("install_count")
if java_install_total > 0:
if (float(install_count) / float(java_install_total)) < 1:
result = math.ceil(float(install_count) / float(java_install_total))
else:
result = float(install_count) / float(java_install_total)
else:
result = 0
result = int(result)
"""
ANALYZE_DATA["jre_product_count"] = """
# get total count of JRE installed across endpoints
df = self.get_result("cleaned_jre_df")
result = len(df['Version'].unique())
"""
ANALYZE_DATA["jre_install_count"] = """
# get total count of JRE products installed across endpoints
df = self.get_result("cleaned_jre_df")
result = df['Count'].sum()
"""
ANALYZE_DATA["jre_vul_count"] = """
# Total known JRE Vulnerabilities from CVEDetails.com
csv = "internet_data.csv"
df = self.load_csv_as_df(csv)
result = df.iloc[0]["jre_vul_count"]
"""
ANALYZE_DATA["latest_jre_vul_version"] = """
# Latest known JRE Vulnerable Version from CVE Data
csv = "internet_data.csv"
df = self.load_csv_as_df(csv)
result = df.iloc[0]["latest_jre_vul_version"]
"""
ANALYZE_DATA["jre_vul_endpoints"] = """
# Approximate Total Number of Vulnerable Installs on Endpoints (Compare latest version Across Endpoints to CVE Data)
df = self.get_result("cleaned_jre_df")
latest_ver = self.get_result("latest_jre_vul_version")
result = df.loc[df['Version'] <= latest_ver, 'Count'].sum()
"""
ANALYZE_DATA["jre_percent_vul_endpoints"] = """
# Approximate Percentage of Vulnerable Endpoints (Compare latest version Across Endpoints to CVE Data)
df = self.get_result("cleaned_jre_df")
vul_endpoints = self.get_result("jre_vul_endpoints")
install_count = self.get_result("jre_install_count")
result = int(math.ceil((int(vul_endpoints) * 100) / int(install_count)))
"""
ANALYZE_DATA["jre_non_vul_endpoints"] = """
# Approximate Total Number of Non-Vulnerable Installs on Endpoints ( Total JRE Install Count - Total Vul Endpoints )
jre_vul_ep = self.get_result("jre_vul_endpoints")
jre_total_install = self.get_result("jre_install_count")
result = int(int(jre_total_install) - int(jre_vul_ep))
"""
def jre_vul_count(wequests, pkgs, **kwargs):
"""Get java vulnerability count from cvedetails.com."""
# content url
url = "http://www.cvedetails.com/product/19117/Oracle-JRE.html"
r = wequests.request(url=url)
# soupify it
soup = pkgs.BeautifulSoup(r.content, "lxml")
# find table with versions from website
souptable = soup.body.find(text='Total').parent
# get total from TD next to it
totalvul = souptable.find_next_sibling('td')
# cleanup
totalvul = totalvul.string.replace("\t", "").replace("\n", "")
return totalvul
def latest_jre_vul_version(wequests, pkgs, **kwargs):
"""Get latest java vulnerable version from cvedetails.com."""
# content url
url = "http://www.cvedetails.com/version-list/93/19117/1/Oracle-JRE.html"
r = wequests.request(url=url)
# soupify it
soup = pkgs.BeautifulSoup(r.content, "lxml")
# find table with versions from website
souptable = soup.find('table', attrs={'class': 'listtable'})
# declare empty arrays to dump data from website
data = []
versions = []
# for each row in table, place it inside the "data" array
for row in souptable.findAll("tr"):
cells = row.findAll("td")
#only add it to the array if the row isn't empty
if len(cells) > 0:
data.append(cells)
# for each td in table, extract data into an array
for ele in data:
# doing some basic testing on data gathered from table.
# First, we need to do some cleanup from HTML obfuscation
test = ele[0].string.replace("\t", "").replace("\n", "")
# We make sure the first element of the list is a version number, not an application name
# if this test passes, we can proceed with rest of logic. "test" is no longer used
if test[0].isdigit():
# create sub version array
subversion = []
# get first 4 columns
for td in ele[:3]:
# cleanup html some more
td = td.string.replace("\t", "").replace("\n", "")
# make sure cell is not empty
if len(td) > 0:
# make sure cell isn't text
# or in the case of JRE, make sure its the "Update X" field which contains version information
if td[0].isdigit() is True or td[0].startswith("U"):
#if it is, use regex to clean any string characters and leave numbers only
td = pkgs.re.sub(r"[^.0-9]+", "",td)
#place numbers in temp array called subversion
subversion.append(td)
#place sub versions into a versions array which we will process further below
versions.append(subversion)
#new empty array to join major and "Update" versions extracted above inside "versions" array
newversions = []
#iterate over versions
for version in versions:
#3/14/17 - website does not include "0" at end of versions numbers, i.e. "1.8"
#we expect all JRE versions to include final zero, ie "1.8.0"
#we check length of major version for zero or some value at end. If none, we assume its missing a "0" and add it.
if len(version[0]) < 5:
version[0] = version[0] + ".0"
#once we process all version numbers we join them into final table
if len(version) > 1:
version = '.'.join(version)
version = version[2:]
newversions.append(version)
#we use natsorted to sort versions from highest to lowest and return highest value for CVE processing
ret = pkgs.natsorted(newversions, reverse=True)[0]
return ret
|