generated from USC-EE-250L-Spring-2023/lab-10
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
99 lines (80 loc) · 3.64 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import time
import numpy as np
from typing import List, Optional
import threading
import pandas as pd
import requests
import plotly.express as px
def generate_data() -> List[int]:
"""Generate some random data."""
return np.random.randint(100, 10000, 1000).tolist()
def process1(data: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
def foo(x):
"""Find the next largest prime number."""
while True:
x += 1
if all(x % i for i in range(2, x)):
return x
return [foo(x) for x in data]
def process2(data: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
def foo(x):
"""Find the next largest prime number."""
while True:
x += 1
if int(np.sqrt(x)) ** 2 == x:
return x
return [foo(x) for x in data]
def final_process(data1: List[int], data2: List[int]) -> List[int]:
"""TODO: Document this function. What does it do? What are the inputs and outputs?"""
return np.mean([x - y for x, y in zip(data1, data2)])
offload_url = 'http://192.168.4.74:5000' # TODO: Change this to the IP address of your server
def run(offload: Optional[str] = None) -> float:
"""Run the program, offloading the specified function(s) to the server.
Args:
offload: Which function(s) to offload to the server. Can be None, 'process1', 'process2', or 'both'.
Returns:
float: the final result of the program.
"""
data = generate_data()
if offload is None: # in this case, we run the program locally
data1 = process1(data)
data2 = process2(data)
elif offload == 'process1':
data1 = None
def offload_process1(data):
nonlocal data1
# TODO: Send a POST request to the server with the input data
data1 = response.json()
thread = threading.Thread(target=offload_process1, args=(data,))
thread.start()
data2 = process2(data)
thread.join()
# Question 2: Why do we need to join the thread here?
# Question 3: Are the processing functions executing in parallel or just concurrently? What is the difference?
# See this article: https://oxylabs.io/blog/concurrency-vs-parallelism
# ChatGPT is also good at explaining the difference between parallel and concurrent execution!
# Make sure to cite any sources you use to answer this question.
elif offload == 'process2':
# TODO: Implement this case
pass
elif offload == 'both':
# TODO: Implement this case
pass
ans = final_process(data1, data2)
return ans
def main():
# TODO: Run the program 5 times for each offloading mode, and record the total execution time
# Compute the mean and standard deviation of the execution times
# Hint: store the results in a pandas DataFrame, use previous labs as a reference
# TODO: Plot makespans (total execution time) as a bar chart with error bars
# Make sure to include a title and x and y labels
# TODO: save plot to "makespan.png"
# Question 4: What is the best offloading mode? Why do you think that is?
# Question 5: What is the worst offloading mode? Why do you think that is?
# Question 6: The processing functions in the example aren't very likely to be used in a real-world application.
# What kind of processing functions would be more likely to be used in a real-world application?
# When would you want to offload these functions to a server?
if __name__ == '__main__':
main()