forked from bellwether/minerva
slightly changed the parallel algorithm
This commit is contained in:
parent
dc5a3519a8
commit
c4e0b71a98
1 changed files with 6 additions and 1 deletions
|
|
@ -1,5 +1,10 @@
|
||||||
|
import math
|
||||||
from joblib import Parallel, delayed
|
from joblib import Parallel, delayed
|
||||||
|
|
||||||
|
# If you have a list of 100 elements and want to process it with 8 cores,
|
||||||
|
# it will split it into 8 chunks (7 chunks of 13, 1 chunk of 9). `func` is
|
||||||
|
# then run serially on the contents within its own process
|
||||||
|
#
|
||||||
# Instead of taking each object in the list and giving it its own thread,
|
# Instead of taking each object in the list and giving it its own thread,
|
||||||
# this splits the list into `cores` groups and gives each group its own
|
# this splits the list into `cores` groups and gives each group its own
|
||||||
# thread, where the group is now processed in series within its thread.
|
# thread, where the group is now processed in series within its thread.
|
||||||
|
|
@ -11,7 +16,7 @@ from joblib import Parallel, delayed
|
||||||
# parallel_map(say, [str(i) for i in range(10)], cores=4)
|
# parallel_map(say, [str(i) for i in range(10)], cores=4)
|
||||||
def parallel_map(func=None, data=None, cores=8):
|
def parallel_map(func=None, data=None, cores=8):
|
||||||
cores = min(cores, len(data))
|
cores = min(cores, len(data))
|
||||||
size = len(data) // cores
|
size = math.ceil(len(data) / cores)
|
||||||
groups = [data[i:i + size] for i in range(0, len(data), size)]
|
groups = [data[i:i + size] for i in range(0, len(data), size)]
|
||||||
|
|
||||||
def wrapper_func(fs):
|
def wrapper_func(fs):
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue