@@ -6,12 +6,16 @@ use crate::clustering::metric::Metric;
6
6
use crate :: clustering:: transitions:: Decomp ;
7
7
use crate :: mccfr:: encoder:: Encoder ;
8
8
use crate :: mccfr:: profile:: Profile ;
9
+ use byteorder:: ReadBytesExt ;
10
+ use byteorder:: BE ;
9
11
use std:: fs:: File ;
10
12
use std:: io:: BufReader ;
11
13
use std:: io:: Read ;
12
14
use std:: io:: Seek ;
13
15
use std:: sync:: Arc ;
14
16
use tokio_postgres:: binary_copy:: BinaryCopyInWriter ;
17
+ use tokio_postgres:: types:: ToSql ;
18
+ use tokio_postgres:: types:: Type ;
15
19
use tokio_postgres:: Client ;
16
20
use tokio_postgres:: Error as E ;
17
21
@@ -28,59 +32,22 @@ impl Writer {
28
32
let postgres = Self ( crate :: db ( ) . await ) ;
29
33
postgres. upload :: < Metric > ( ) . await ?;
30
34
postgres. upload :: < Decomp > ( ) . await ?;
31
- postgres. upload :: < Encoder > ( ) . await ?;
35
+ postgres. upload :: < Encoder > ( ) . await ?; // Lookup ?
32
36
postgres. upload :: < Profile > ( ) . await ?;
33
37
postgres. derive :: < Abstraction > ( ) . await ?;
34
38
postgres. derive :: < Street > ( ) . await ?;
35
39
postgres. vacuum ( ) . await ?;
36
40
Ok ( ( ) )
37
41
}
38
42
39
- async fn upload < U > ( & self ) -> Result < ( ) , E >
40
- where
41
- U : Upload ,
42
- {
43
- let ref name = U :: name ( ) ;
44
- if self . has_rows ( name) . await ? {
45
- log:: info!( "tables data already uploaded ({})" , name) ;
46
- Ok ( ( ) )
47
- } else {
48
- log:: info!( "copying {}" , name) ;
49
- self . 0 . batch_execute ( & U :: prepare ( ) ) . await ?;
50
- self . 0 . batch_execute ( & U :: nuke ( ) ) . await ?;
51
- let sink = self . 0 . copy_in ( & U :: copy ( ) ) . await ?;
52
- let writer = BinaryCopyInWriter :: new ( sink, U :: columns ( ) ) ;
53
- futures:: pin_mut!( writer) ;
54
- let ref mut count = [ 0u8 ; 2 ] ;
55
- for ref mut reader in U :: sources ( )
56
- . iter ( )
57
- . map ( |s| File :: open ( s) . expect ( "file not found" ) )
58
- . map ( |f| BufReader :: new ( f) )
59
- {
60
- reader. seek ( std:: io:: SeekFrom :: Start ( 19 ) ) . unwrap ( ) ;
61
- while let Ok ( _) = reader. read_exact ( count) {
62
- match u16:: from_be_bytes ( count. clone ( ) ) {
63
- 0xFFFF => break ,
64
- length => {
65
- assert ! ( length == U :: columns( ) . len( ) as u16 ) ;
66
- let row = U :: read ( reader) ;
67
- let row = row. iter ( ) . map ( |b| & * * b) . collect :: < Vec < _ > > ( ) ;
68
- writer. as_mut ( ) . write ( & row) . await ?;
69
- }
70
- }
71
- }
72
- }
73
- writer. finish ( ) . await ?;
74
- self . 0 . batch_execute ( & U :: indices ( ) ) . await ?;
75
- Ok ( ( ) )
76
- }
77
- }
78
-
79
43
async fn derive < D > ( & self ) -> Result < ( ) , E >
80
44
where
81
45
D : Derive ,
82
46
{
83
47
let ref name = D :: name ( ) ;
48
+ // if !does_exist(name).await? {
49
+ // create
50
+ // }
84
51
if self . has_rows ( name) . await ? {
85
52
log:: info!( "tables data already uploaded ({})" , name) ;
86
53
Ok ( ( ) )
@@ -103,6 +70,73 @@ impl Writer {
103
70
}
104
71
}
105
72
73
+ async fn upload < U > ( & self ) -> Result < ( ) , E >
74
+ where
75
+ U : Upload ,
76
+ {
77
+ let ref name = U :: name ( ) ;
78
+ // if !does_exist(name).await? {
79
+ // create
80
+ // }
81
+ if self . has_rows ( name) . await ? {
82
+ log:: info!( "tables data already uploaded ({})" , name) ;
83
+ Ok ( ( ) )
84
+ } else {
85
+ log:: info!( "copying {}" , name) ;
86
+ self . prepare :: < U > ( ) . await ?;
87
+ self . stream :: < U > ( ) . await ?;
88
+ self . index :: < U > ( ) . await ?;
89
+ Ok ( ( ) )
90
+ }
91
+ }
92
+
93
+ async fn prepare < T > ( & self ) -> Result < ( ) , E >
94
+ where
95
+ T : Upload ,
96
+ {
97
+ self . 0 . batch_execute ( & T :: prepare ( ) ) . await ?;
98
+ self . 0 . batch_execute ( & T :: nuke ( ) ) . await ?;
99
+ Ok ( ( ) )
100
+ }
101
+
102
+ async fn index < T > ( & self ) -> Result < ( ) , E >
103
+ where
104
+ T : Upload ,
105
+ {
106
+ self . 0 . batch_execute ( & T :: indices ( ) ) . await ?;
107
+ Ok ( ( ) )
108
+ }
109
+
110
+ async fn stream < T > ( & self ) -> Result < ( ) , E >
111
+ where
112
+ T : Upload ,
113
+ {
114
+ let sink = self . 0 . copy_in ( & T :: copy ( ) ) . await ?;
115
+ let writer = BinaryCopyInWriter :: new ( sink, T :: columns ( ) ) ;
116
+ futures:: pin_mut!( writer) ;
117
+ let ref mut count = [ 0u8 ; 2 ] ;
118
+ for ref mut reader in T :: sources ( )
119
+ . iter ( )
120
+ . map ( |s| File :: open ( s) . expect ( "file not found" ) )
121
+ . map ( |f| BufReader :: new ( f) )
122
+ {
123
+ reader. seek ( std:: io:: SeekFrom :: Start ( 19 ) ) . unwrap ( ) ;
124
+ while let Ok ( _) = reader. read_exact ( count) {
125
+ match u16:: from_be_bytes ( count. clone ( ) ) {
126
+ 0xFFFF => break ,
127
+ length => {
128
+ assert ! ( length == T :: columns( ) . len( ) as u16 ) ;
129
+ let row = Self :: read :: < T > ( reader) ;
130
+ let row = row. iter ( ) . map ( |b| & * * b) . collect :: < Vec < _ > > ( ) ;
131
+ writer. as_mut ( ) . write ( & row) . await ?;
132
+ }
133
+ }
134
+ }
135
+ }
136
+ writer. finish ( ) . await ?;
137
+ Ok ( ( ) )
138
+ }
139
+
106
140
async fn vacuum ( & self ) -> Result < ( ) , E > {
107
141
self . 0 . batch_execute ( "VACUUM ANALYZE;" ) . await
108
142
}
@@ -116,7 +150,7 @@ impl Writer {
116
150
" ,
117
151
table
118
152
) ;
119
- Ok ( 0 != self . 0 . query_one ( sql, & [ ] ) . await ?. get :: < _ , i64 > ( 0 ) )
153
+ Ok ( ! self . 0 . query ( sql, & [ ] ) . await ?. is_empty ( ) )
120
154
} else {
121
155
Ok ( false )
122
156
}
@@ -130,6 +164,27 @@ impl Writer {
130
164
" ,
131
165
table
132
166
) ;
133
- Ok ( 1 == self . 0 . query_one ( sql, & [ ] ) . await ?. get :: < _ , i64 > ( 0 ) )
167
+ Ok ( !self . 0 . query ( sql, & [ ] ) . await ?. is_empty ( ) )
168
+ }
169
+
170
+ fn read < T > ( reader : & mut BufReader < File > ) -> Vec < Box < dyn ToSql + Sync > >
171
+ where
172
+ T : Upload ,
173
+ {
174
+ T :: columns ( )
175
+ . iter ( )
176
+ . cloned ( )
177
+ . map ( |ty| match ty {
178
+ Type :: FLOAT4 => {
179
+ assert ! ( reader. read_u32:: <BE >( ) . expect( "length" ) == 4 ) ;
180
+ Box :: new ( reader. read_f32 :: < BE > ( ) . expect ( "data" ) ) as Box < dyn ToSql + Sync >
181
+ }
182
+ Type :: INT8 => {
183
+ assert ! ( reader. read_u32:: <BE >( ) . expect( "length" ) == 8 ) ;
184
+ Box :: new ( reader. read_i64 :: < BE > ( ) . expect ( "data" ) ) as Box < dyn ToSql + Sync >
185
+ }
186
+ _ => panic ! ( "unsupported type: {}" , ty) ,
187
+ } )
188
+ . collect :: < Vec < Box < dyn ToSql + Sync > > > ( )
134
189
}
135
190
}
0 commit comments