1 module rt.moduleinfo; 2 pragma(LDC_no_moduleinfo): 3 4 import rt.sections; 5 import lwdr.tracking; 6 import lwdr.internal.unionset; 7 8 version(LWDR_ModuleCtors_MinHeap) 9 { 10 version(LWDR_ModuleCtors) {} 11 else static assert(0, "LWDR_ModuleCtors_MinHeap requires LWDR_ModuleCtors to be defined."); 12 } 13 14 version(LWDR_ModuleCtors): 15 16 private __gshared immutable(ModuleInfo*)[] modules; 17 private __gshared immutable(ModuleInfo)*[] ctors; 18 private __gshared immutable(ModuleInfo)*[] tlsctors; 19 20 private immutable(ModuleInfo*)[] getModules() nothrow @nogc 21 out(result) { 22 foreach(r; result) 23 assert(r !is null); 24 } 25 do { 26 size_t count; 27 foreach(m; allModules) 28 count++; 29 30 immutable(ModuleInfo)*[] result = (cast(immutable(ModuleInfo)**)lwdrInternal_alloc(size_t.sizeof * count))[0..count]; 31 size_t i; 32 foreach(m; allModules) 33 result[i++] = m; 34 return cast(immutable)result; 35 } 36 37 void sortCtors() nothrow @nogc 38 { 39 import core.bitop : bts, btr, bt, BitRange; 40 41 immutable len = modules.length; 42 if(len == 0) 43 return; // nothing to do 44 45 // allocate some stack arrays that will be used throughout the process. 46 immutable nwords = (len + 8 * size_t.sizeof - 1) / (8 * size_t.sizeof); 47 immutable flagbytes = nwords * size_t.sizeof; 48 auto ctorstart = cast(size_t*)lwdrInternal_alloc(flagbytes); // ctor/dtor seen 49 auto ctordone = cast(size_t*)lwdrInternal_alloc(flagbytes); // ctor/dtor processed 50 auto relevant = cast(size_t*)lwdrInternal_alloc(flagbytes); // has ctors/dtors 51 scope(exit) 52 { 53 lwdrInternal_free(ctorstart); 54 lwdrInternal_free(ctordone); 55 lwdrInternal_free(relevant); 56 } 57 58 void clearFlags(size_t* flags) @nogc nothrow 59 { 60 import util; 61 memset(flags, 0, flagbytes); 62 } 63 64 // build the edges between each module. We may need this for printing, 65 // and also allows avoiding keeping a hash around for module lookups. 66 int[][] edges = (cast(int[]*)lwdrInternal_alloc((int[]).sizeof * modules.length))[0 .. modules.length]; 67 { 68 LLUnionSet modIndexes = LLUnionSet(modules.length); 69 foreach(i, m; modules) 70 modIndexes[cast(size_t)m] = i; 71 72 auto reachable = cast(size_t*)lwdrInternal_alloc(flagbytes); 73 scope(exit) lwdrInternal_free(reachable); 74 75 foreach(i, m; modules) 76 { 77 clearFlags(reachable); 78 int* edge = cast(int*)lwdrInternal_alloc(int.sizeof * modules.length); 79 size_t nEdges = 0; 80 foreach(imp; m.importedModules) 81 { 82 if(imp is m) 83 continue; // self-import 84 if(auto impidx = cast(size_t)imp in modIndexes) 85 if(!bts(reachable, *impidx)) 86 edge[nEdges++] = *impidx; 87 } 88 edges[i] = edge[0 .. nEdges]; 89 // cannot trim edges[i] 90 } 91 } 92 93 scope(exit) 94 { 95 foreach(e; edges) 96 if(e.ptr) 97 lwdrInternal_free(e.ptr); 98 lwdrInternal_free(edges.ptr); 99 } 100 101 /++ find all the non-trivial dependencies (that is, dependencies that have a 102 ctor or dtor) of a given module. Doing this, we can 'skip over' the 103 trivial modules to get at the non-trivial ones. 104 If a cycle is detected, returns the index of the module that completes the cycle. 105 Returns: true for success, false for a deprecated cycle error ++/ 106 bool findDeps(size_t idx, size_t* reachable) nothrow @nogc 107 { 108 static struct StackFrame 109 { 110 size_t curMod, curDep; 111 } 112 113 auto stack = cast(StackFrame*)lwdrInternal_alloc(StackFrame.sizeof * len); 114 scope(exit) lwdrInternal_free(stack); 115 auto stackTop = stack + len; 116 auto sp = stack; 117 sp.curMod = cast(int)idx; 118 sp.curDep = 0; 119 120 clearFlags(reachable); 121 bts(reachable, idx); 122 123 for(;;) 124 { 125 auto m = modules[sp.curMod]; 126 if(sp.curDep >= edges[sp.curMod].length) 127 { 128 if(sp == stack) // finished the algorithm 129 break; 130 --sp; 131 } 132 else 133 { 134 auto midx = edges[sp.curMod][sp.curDep]; 135 if(!bts(reachable, midx)) 136 { 137 if(bt(relevant, midx)) 138 { 139 // need to process this node, don't recurse. 140 if(bt(ctorstart, midx)) 141 { 142 // was already started, this is a cycle. 143 assert(false, "Module cycle detected!"); 144 } 145 } 146 else if(!bt(ctordone, midx)) 147 { 148 // non-relevant, and hasn't been exhaustively processed, recurse. 149 if(++sp >= stackTop) 150 assert(false, "Stack overflow on module dependency search!"); // stack overflow, this shouldn't happen. 151 sp.curMod = midx; 152 sp.curDep = 0; 153 continue; 154 } 155 } 156 } 157 // next depedency 158 ++sp.curDep; 159 } 160 return true; 161 } 162 163 // This function will determine the order of construction/destruction and 164 // check for cycles. If a cycle is found, the cycle path is transformed 165 // into a string and thrown as an error. 166 // 167 // Each call into this function is given a module that has static 168 // ctor/dtors that must be dealt with. It recurses only when it finds 169 // dependencies that also have static ctor/dtors. 170 // Returns: true for success, false for a deprecated cycle error 171 bool processMod(size_t curidx, immutable(ModuleInfo)** c, ref size_t ctoridx) nothrow @nogc 172 { 173 immutable ModuleInfo* current = modules[curidx]; 174 175 // First, determine which modules are reachable. 176 auto reachable = cast(size_t*)lwdrInternal_alloc(flagbytes); 177 scope(exit) lwdrInternal_free(reachable); 178 if(!findDeps(curidx, reachable)) 179 return false; // deprecated cycle error 180 181 // process the dependencies. First, we process all relevant ones 182 bts(ctorstart, curidx); 183 auto brange = BitRange(reachable, len); 184 foreach(i; brange) 185 // note, don't check for cycles here, because the config could have been set to ignore cycles. 186 // however, don't recurse if there is one, so still check for started ctor. 187 if(i != curidx && bt(relevant, i) && !bt(ctordone, i) && !bt(ctorstart, i)) 188 if(!processMod(i, c, ctoridx)) 189 return false; // deprecated cycle error 190 191 // now mark this node, and all nodes reachable from this module as done. 192 bts(ctordone, curidx); 193 btr(ctorstart, curidx); 194 foreach(i; brange) 195 // Since relevant dependencies are already marked as done 196 // from recursion above (or are going to be handled up the call 197 // stack), no reason to check for relevance, that is a wasted 198 // op. 199 bts(ctordone, i); 200 201 c[ctoridx++] = current; 202 return true; 203 } 204 205 // returns `false` if deprecated cycle error otherwise set `result`. 206 bool doSort(size_t relevantFlags, ref immutable(ModuleInfo)*[] result) nothrow @nogc 207 { 208 clearFlags(relevant); 209 clearFlags(ctorstart); 210 clearFlags(ctordone); 211 212 // pre-allocate enough space to hold all modules. 213 immutable(ModuleInfo)** ctors = cast(immutable(ModuleInfo)**)lwdrInternal_alloc(len * (void*).sizeof); 214 size_t ctoridx = 0; 215 foreach(idx, m; modules) 216 { 217 if(m.flags & relevantFlags) 218 { 219 if(m.flags & MIstandalone) 220 ctors[ctoridx++] = m; // can run at any time. Just run it first. 221 else 222 bts(relevant, idx); 223 } 224 } 225 226 // now run the algorithm in the relevant ones 227 foreach(idx; BitRange(relevant, len)) 228 { 229 if(!bt(ctordone, idx)) 230 if(!processMod(idx, ctors, ctoridx)) 231 return false; 232 } 233 if(ctoridx == 0) 234 // no ctors in the list 235 lwdrInternal_free(ctors); 236 else 237 // cannot trim ctors :( 238 result = ctors[0 .. ctoridx]; 239 return true; 240 } 241 242 if(!doSort(MIctor | MIdtor, ctors) || !doSort(MItlsctor | MItlsdtor, tlsctors)) 243 assert(false, "Module cycle deprecation 16211 warning!"); 244 } 245 246 private void freeCtorLists() nothrow @nogc 247 { 248 if(ctors.ptr) 249 lwdrInternal_free(ctors.ptr); 250 ctors = null; 251 if(tlsctors.ptr) 252 lwdrInternal_free(tlsctors.ptr); 253 tlsctors = null; 254 } 255 256 extern(C) 257 { 258 void __lwdr_moduleInfo_runModuleCtors() nothrow @nogc 259 { 260 modules = getModules; 261 262 sortCtors; 263 version(LWDR_ModuleCtors_MinHeap) 264 scope(exit) 265 freeCtorLists; 266 267 foreach(m; modules) // independent ctors 268 { 269 // must cast or won't call 270 auto ct = cast(void function() nothrow @nogc)m.ictor; 271 if(ct !is null) ct(); 272 } 273 foreach(m; ctors) // sorted module ctors 274 { 275 auto ct = cast(void function() nothrow @nogc)m.ctor; 276 if(ct !is null) ct(); 277 } 278 } 279 280 void __lwdr_moduleInfo_runTlsCtors() nothrow @nogc 281 { 282 version(LWDR_ModuleCtors_MinHeap) 283 { 284 sortCtors; 285 scope(exit) 286 freeCtorLists; 287 } 288 289 foreach(m; tlsctors) 290 { 291 auto tlsct = cast(void function() nothrow @nogc)m.tlsctor; 292 if(tlsct !is null) tlsct(); 293 } 294 } 295 296 void __lwdr_moduleInfo_runTlsDtors() nothrow @nogc 297 { 298 version(LWDR_ModuleCtors_MinHeap) 299 { 300 sortCtors; 301 scope(exit) 302 freeCtorLists; 303 } 304 305 foreach_reverse(m; tlsctors) 306 { 307 auto tlsdt = cast(void function() nothrow @nogc)m.tlsdtor; 308 if(tlsdt !is null) tlsdt(); 309 } 310 } 311 312 void __lwdr_moduleInfo_runModuleDtors() nothrow @nogc 313 { 314 version(LWDR_ModuleCtors_MinHeap) 315 sortCtors; 316 317 scope(exit) freeCtorLists; 318 319 foreach_reverse(m; ctors) 320 { 321 auto dt = cast(void function() nothrow @nogc)m.dtor; 322 if(dt !is null) dt(); 323 } 324 } 325 }