20
20
import re
21
21
import subprocess
22
22
import sys
23
+ import xml .etree .ElementTree as ElementTree
24
+ from pathlib import Path
23
25
24
26
import click
25
27
@@ -36,6 +38,20 @@ def get_xml_path(filename, output_dir):
36
38
return os .path .abspath (os .path .join (output_dir , xml ))
37
39
38
40
41
+ def make_asciidoc (target : str , include_in_progress : bool , spec_dir : str , dry_run : bool ) -> str :
42
+ cmd = ['make' , 'PRINT_FILENAMES=1' ]
43
+ if include_in_progress :
44
+ cmd .append ('INCLUDE_IN_PROGRESS=1' )
45
+ cmd .append (target )
46
+ if dry_run :
47
+ print (cmd )
48
+ return ''
49
+ else :
50
+ ret = subprocess .check_output (cmd , cwd = spec_dir ).decode ('UTF-8' ).rstrip ()
51
+ print (ret )
52
+ return ret
53
+
54
+
39
55
@click .command ()
40
56
@click .option (
41
57
'--scraper' ,
@@ -56,16 +72,21 @@ def get_xml_path(filename, output_dir):
56
72
default = False ,
57
73
is_flag = True ,
58
74
help = 'Flag for dry run' )
59
- def main (scraper , spec_root , output_dir , dry_run ):
75
+ @click .option (
76
+ '--include-in-progress' ,
77
+ default = True ,
78
+ type = bool ,
79
+ help = 'Include in-progress items from spec' )
80
+ def main (scraper , spec_root , output_dir , dry_run , include_in_progress ):
60
81
# Clusters need to be scraped first because the cluster directory is passed to the device type directory
61
- scrape_clusters (scraper , spec_root , output_dir , dry_run )
62
- scrape_device_types (scraper , spec_root , output_dir , dry_run )
82
+ scrape_clusters (scraper , spec_root , output_dir , dry_run , include_in_progress )
83
+ scrape_device_types (scraper , spec_root , output_dir , dry_run , include_in_progress )
63
84
if not dry_run :
64
85
dump_versions (scraper , spec_root , output_dir )
65
86
dump_cluster_ids (output_dir )
66
87
67
88
68
- def scrape_clusters (scraper , spec_root , output_dir , dry_run ):
89
+ def scrape_clusters (scraper , spec_root , output_dir , dry_run , include_in_progress ):
69
90
src_dir = os .path .abspath (os .path .join (spec_root , 'src' ))
70
91
sdm_clusters_dir = os .path .abspath (
71
92
os .path .join (src_dir , 'service_device_management' ))
@@ -74,42 +95,64 @@ def scrape_clusters(scraper, spec_root, output_dir, dry_run):
74
95
media_clusters_dir = os .path .abspath (
75
96
os .path .join (app_clusters_dir , 'media' ))
76
97
clusters_output_dir = os .path .abspath (os .path .join (output_dir , 'clusters' ))
77
- dm_clusters_list = ['ACL-Cluster.adoc' , 'Binding-Cluster.adoc' , 'bridge-clusters.adoc' ,
78
- 'Descriptor-Cluster.adoc' , 'Group-Key-Management-Cluster.adoc' , 'ICDManagement.adoc' ,
79
- 'Label-Cluster.adoc' ]
80
- sdm_exclude_list = ['AdminAssistedCommissioningFlows.adoc' , 'BulkDataExchange.adoc' , 'CommissioningFlows.adoc' ,
81
- 'DeviceCommissioningFlows.adoc' , 'DistributedComplianceLedger.adoc' , 'OTAFileFormat.adoc' ]
82
- app_exclude_list = ['appliances.adoc' , 'closures.adoc' , 'general.adoc' ,
83
- 'hvac.adoc' , 'lighting.adoc' , 'meas_and_sense.adoc' , 'robots.adoc' ]
84
- media_exclude_list = ['media.adoc' , 'VideoPlayerArchitecture.adoc' ]
85
98
86
99
if not os .path .exists (clusters_output_dir ):
87
100
os .makedirs (clusters_output_dir )
88
101
102
+ print ('Generating main spec to get file include list - this make take a few minutes' )
103
+ main_out = make_asciidoc ('pdf' , include_in_progress , spec_root , dry_run )
104
+ print ('Generating cluster spec to get file include list - this make take a few minutes' )
105
+ cluster_out = make_asciidoc ('pdf-appclusters-book' , include_in_progress , spec_root , dry_run )
106
+
89
107
def scrape_cluster (filename : str ) -> None :
108
+ base = Path (filename ).stem
109
+ if base not in main_out and base not in cluster_out :
110
+ print (f'skipping file: { base } as it is not compiled into the asciidoc' )
111
+ return
90
112
xml_path = get_xml_path (filename , clusters_output_dir )
91
113
cmd = [scraper , 'cluster' , '-i' , filename , '-o' ,
92
- xml_path , '-nd' , '--define' , 'in-progress' ]
114
+ xml_path , '-nd' ]
115
+ if include_in_progress :
116
+ cmd .extend (['--define' , 'in-progress' ])
93
117
if dry_run :
94
118
print (cmd )
95
119
else :
96
120
subprocess .run (cmd )
97
121
98
122
def scrape_all_clusters (dir : str , exclude_list : list [str ] = []) -> None :
99
123
for filename in glob .glob (f'{ dir } /*.adoc' ):
100
- if os .path .basename (filename ) in exclude_list :
101
- continue
102
124
scrape_cluster (filename )
103
125
104
- scrape_all_clusters (sdm_clusters_dir , sdm_exclude_list )
105
- scrape_all_clusters (app_clusters_dir , app_exclude_list )
106
- scrape_all_clusters (media_clusters_dir , media_exclude_list )
107
- for f in dm_clusters_list :
108
- filename = f'{ dm_clusters_dir } /{ f } '
109
- scrape_cluster (filename )
110
-
111
-
112
- def scrape_device_types (scraper , spec_root , output_dir , dry_run ):
126
+ scrape_all_clusters (dm_clusters_dir )
127
+ scrape_all_clusters (sdm_clusters_dir )
128
+ scrape_all_clusters (app_clusters_dir )
129
+ scrape_all_clusters (media_clusters_dir )
130
+
131
+ for xml_path in glob .glob (f'{ clusters_output_dir } /*.xml' ):
132
+ tree = ElementTree .parse (f'{ xml_path } ' )
133
+ root = tree .getroot ()
134
+ cluster = next (root .iter ('cluster' ))
135
+ # If there's no cluster ID table, this isn't a cluster
136
+ try :
137
+ next (cluster .iter ('clusterIds' ))
138
+ except StopIteration :
139
+ # If there's no cluster ID table, this isn't a cluster just some kind of intro adoc
140
+ print (f'Removing file { xml_path } as it does not include any cluster definitions' )
141
+ os .remove (xml_path )
142
+ continue
143
+ # For now, we're going to manually remove the word "Cluster" from the cluster name field
144
+ # to make the diff easier. The update to 1.2.4 of the scraper added this.
145
+ # TODO: submit a separate PR with JUST this change revered and remove this code.
146
+ with open (xml_path , 'rb' ) as input :
147
+ xml_str = input .read ()
148
+
149
+ original_name = bytes (cluster .attrib ['name' ], 'utf-8' )
150
+ replacement_name = bytes (cluster .attrib ['name' ].removesuffix (" Cluster" ), 'utf-8' )
151
+ with open (xml_path , 'wb' ) as output :
152
+ output .write (xml_str .replace (original_name , replacement_name ))
153
+
154
+
155
+ def scrape_device_types (scraper , spec_root , output_dir , dry_run , include_in_progress ):
113
156
device_type_dir = os .path .abspath (
114
157
os .path .join (spec_root , 'src' , 'device_types' ))
115
158
device_types_output_dir = os .path .abspath (
@@ -119,9 +162,16 @@ def scrape_device_types(scraper, spec_root, output_dir, dry_run):
119
162
if not os .path .exists (device_types_output_dir ):
120
163
os .makedirs (device_types_output_dir )
121
164
165
+ print ('Generating device type library to get file include list - this make take a few minutes' )
166
+ device_type_output = make_asciidoc ('pdf-devicelibrary-book' , include_in_progress , spec_root , dry_run )
167
+
122
168
def scrape_device_type (filename : str ) -> None :
169
+ base = Path (filename ).stem
170
+ if base not in device_type_output :
171
+ print (f'skipping file: { filename } as it is not compiled into the asciidoc' )
172
+ return
123
173
xml_path = get_xml_path (filename , device_types_output_dir )
124
- cmd = [scraper , 'devicetype' , '-c' , clusters_output_dir ,
174
+ cmd = [scraper , 'devicetype' , '-c' , '-cls' , clusters_output_dir ,
125
175
'-nd' , '-i' , filename , '-o' , xml_path ]
126
176
if dry_run :
127
177
print (cmd )
@@ -187,7 +237,8 @@ def dump_cluster_ids(output_dir):
187
237
188
238
json_file = os .path .join (clusters_output_dir , 'cluster_ids.json' )
189
239
with open (json_file , "w" ) as outfile :
190
- json .dump (json_dict , outfile , indent = 2 )
240
+ json .dump (json_dict , outfile , indent = 4 )
241
+ outfile .write ('\n ' )
191
242
192
243
193
244
if __name__ == '__main__' :
0 commit comments